Commit 24e7b114 authored by luxq's avatar luxq

Merge branch 'ori-master' into master

parents b2a8f3b1 c3f9dc19
...@@ -77,6 +77,9 @@ jobs: ...@@ -77,6 +77,9 @@ jobs:
- name: Test pushsync (chunks) - name: Test pushsync (chunks)
id: pushsync-chunks-1 id: pushsync-chunks-1
run: beekeeper check --cluster-name local-dns --checks=ci-pushsync-chunks run: beekeeper check --cluster-name local-dns --checks=ci-pushsync-chunks
- name: Test pushsync (light mode chunks)
id: pushsync-chunks-2
run: beekeeper check --cluster-name local-dns --checks=ci-pushsync-light-chunks
- name: Test retrieval - name: Test retrieval
id: retrieval-1 id: retrieval-1
run: beekeeper check --cluster-name local-dns --checks=ci-retrieval run: beekeeper check --cluster-name local-dns --checks=ci-retrieval
......
name: OpenAPI
on:
push:
branches:
- 'master'
pull_request:
branches:
- '**'
jobs:
build:
name: Preview
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: '0'
- name: Check whether docs have changed
id: checkdocs
run: |
changed=false
git diff --name-only HEAD^ HEAD > files.txt
while IFS= read -r file
do
if [[ $file == openapi/* ]]; then
echo "detected openapi spec change"
changed=true
fi
done < files.txt
if [ $changed == true ]
then
echo "::set-output name=build_docs::true"
else
echo "::set-output name=build_docs::false"
fi
- name: Build the OpenAPI specs
if: steps.checkdocs.outputs.build_docs == 'true'
uses: acud/openapi-dockerized@v1
with:
build-roots: 'openapi/Swarm.yaml openapi/SwarmDebug.yaml'
env:
AWS_ACCESS_KEY_ID: ${{ secrets.DO_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DO_AWS_SECRET_ACCESS_KEY }}
AWS_EC2_METADATA_DISABLED: true #needed when pushing to DigitalOcean Spaces
AWS_ENDPOINT: fra1.digitaloceanspaces.com
BUCKET_NAME: openapi-specs
...@@ -11,7 +11,7 @@ builds: ...@@ -11,7 +11,7 @@ builds:
- -v - -v
- -trimpath - -trimpath
ldflags: ldflags:
- -s -w -X github.com/ethersphere/bee.version={{.Version}} -X github.com/ethersphere/bee.commit={{.ShortCommit}} -X github.com/ethersphere/bee.commitTime={{.CommitTimestamp}} - -s -w -X github.com/ethersphere/bee.version={{.Version}} -X github.com/ethersphere/bee.commitHash={{.ShortCommit}} -X github.com/ethersphere/bee.commitTime={{.CommitTimestamp}}
env: env:
- CGO_ENABLED=0 - CGO_ENABLED=0
goos: goos:
......
...@@ -8,7 +8,7 @@ COPY . ./ ...@@ -8,7 +8,7 @@ COPY . ./
RUN make binary RUN make binary
FROM debian:10.9-slim FROM debian:10.10-slim
ENV DEBIAN_FRONTEND noninteractive ENV DEBIAN_FRONTEND noninteractive
......
FROM debian:10.9-slim FROM debian:10.10-slim
ENV DEBIAN_FRONTEND noninteractive ENV DEBIAN_FRONTEND noninteractive
......
FROM debian:10.9-slim FROM debian:10.10-slim
ENV DEBIAN_FRONTEND noninteractive ENV DEBIAN_FRONTEND noninteractive
......
...@@ -9,10 +9,10 @@ BEEKEEPER_CLUSTER ?= local ...@@ -9,10 +9,10 @@ BEEKEEPER_CLUSTER ?= local
BEELOCAL_BRANCH ?= main BEELOCAL_BRANCH ?= main
BEEKEEPER_BRANCH ?= master BEEKEEPER_BRANCH ?= master
COMMIT ?= "$(shell git describe --long --dirty --always --match "" || true)" COMMIT_HASH ?= "$(shell git describe --long --dirty --always --match "" || true)"
CLEAN_COMMIT ?= "$(shell git describe --long --always --match "" || true)" CLEAN_COMMIT ?= "$(shell git describe --long --always --match "" || true)"
COMMIT_TIME ?= "$(shell git show -s --format=%ct $(CLEAN_COMMIT) || true)" COMMIT_TIME ?= "$(shell git show -s --format=%ct $(CLEAN_COMMIT) || true)"
LDFLAGS ?= -s -w -X github.com/ethersphere/bee.commit="$(COMMIT)" -X github.com/ethersphere/bee.commitTime="$(COMMIT_TIME)" LDFLAGS ?= -s -w -X github.com/ethersphere/bee.commitHash="$(COMMIT_HASH)" -X github.com/ethersphere/bee.commitTime="$(COMMIT_TIME)"
.PHONY: all .PHONY: all
all: build lint vet test-race binary all: build lint vet test-race binary
......
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
[![Go](https://github.com/ethersphere/bee/workflows/Go/badge.svg)](https://github.com/ethersphere/bee/actions) [![Go](https://github.com/ethersphere/bee/workflows/Go/badge.svg)](https://github.com/ethersphere/bee/actions)
[![Go Reference](https://pkg.go.dev/badge/github.com/ethersphere/bee.svg)](https://pkg.go.dev/github.com/ethersphere/bee) [![Go Reference](https://pkg.go.dev/badge/github.com/ethersphere/bee.svg)](https://pkg.go.dev/github.com/ethersphere/bee)
[![Coverage Status](https://coveralls.io/repos/github/ethersphere/bee/badge.svg)](https://coveralls.io/github/ethersphere/bee) [![Coverage Status](https://coveralls.io/repos/github/ethersphere/bee/badge.svg)](https://coveralls.io/github/ethersphere/bee)
[![API OpenAPI Specs](https://img.shields.io/badge/openapi-api-blue)](https://docs.ethswarm.org/api/)
[![Debug API OpenAPI Specs](https://img.shields.io/badge/openapi-debugapi-lightblue)](https://docs.ethswarm.org/debug-api/)
## DISCLAIMER ## DISCLAIMER
......
...@@ -40,9 +40,10 @@ const ( ...@@ -40,9 +40,10 @@ const (
optionNameNetworkID = "network-id" optionNameNetworkID = "network-id"
optionWelcomeMessage = "welcome-message" optionWelcomeMessage = "welcome-message"
optionCORSAllowedOrigins = "cors-allowed-origins" optionCORSAllowedOrigins = "cors-allowed-origins"
optionNameStandalone = "standalone"
optionNameTracingEnabled = "tracing-enable" optionNameTracingEnabled = "tracing-enable"
optionNameTracingEndpoint = "tracing-endpoint" optionNameTracingEndpoint = "tracing-endpoint"
optionNameTracingHost = "tracing-host"
optionNameTracingPort = "tracing-port"
optionNameTracingServiceName = "tracing-service-name" optionNameTracingServiceName = "tracing-service-name"
optionNameVerbosity = "verbosity" optionNameVerbosity = "verbosity"
optionNameGlobalPinningEnabled = "global-pinning-enable" optionNameGlobalPinningEnabled = "global-pinning-enable"
...@@ -69,6 +70,7 @@ const ( ...@@ -69,6 +70,7 @@ const (
optionNameBlockTime = "block-time" optionNameBlockTime = "block-time"
optionWarmUpTime = "warmup-time" optionWarmUpTime = "warmup-time"
optionNameMainNet = "mainnet" optionNameMainNet = "mainnet"
optionNameRetrievalCaching = "cache-retrieval"
) )
func init() { func init() {
...@@ -210,14 +212,15 @@ func (c *command) setAllFlags(cmd *cobra.Command) { ...@@ -210,14 +212,15 @@ func (c *command) setAllFlags(cmd *cobra.Command) {
cmd.Flags().String(optionNameNATAddr, "", "NAT exposed address") cmd.Flags().String(optionNameNATAddr, "", "NAT exposed address")
cmd.Flags().Bool(optionNameP2PWSEnable, false, "enable P2P WebSocket transport") cmd.Flags().Bool(optionNameP2PWSEnable, false, "enable P2P WebSocket transport")
cmd.Flags().Bool(optionNameP2PQUICEnable, false, "enable P2P QUIC transport") cmd.Flags().Bool(optionNameP2PQUICEnable, false, "enable P2P QUIC transport")
cmd.Flags().StringSlice(optionNameBootnodes, []string{}, "initial nodes to connect to") cmd.Flags().StringSlice(optionNameBootnodes, []string{"/dnsaddr/testnet.ethswarm.org"}, "initial nodes to connect to")
cmd.Flags().Bool(optionNameDebugAPIEnable, false, "enable debug HTTP API") cmd.Flags().Bool(optionNameDebugAPIEnable, false, "enable debug HTTP API")
cmd.Flags().String(optionNameDebugAPIAddr, ":1635", "debug HTTP API listen address") cmd.Flags().String(optionNameDebugAPIAddr, ":1635", "debug HTTP API listen address")
cmd.Flags().Uint64(optionNameNetworkID, 10, "ID of the Swarm network") cmd.Flags().Uint64(optionNameNetworkID, 10, "ID of the Swarm network")
cmd.Flags().StringSlice(optionCORSAllowedOrigins, []string{}, "origins with CORS headers enabled") cmd.Flags().StringSlice(optionCORSAllowedOrigins, []string{}, "origins with CORS headers enabled")
cmd.Flags().Bool(optionNameStandalone, false, "whether we want the node to start with no listen addresses for p2p")
cmd.Flags().Bool(optionNameTracingEnabled, false, "enable tracing") cmd.Flags().Bool(optionNameTracingEnabled, false, "enable tracing")
cmd.Flags().String(optionNameTracingEndpoint, "127.0.0.1:6831", "endpoint to send tracing data") cmd.Flags().String(optionNameTracingEndpoint, "127.0.0.1:6831", "endpoint to send tracing data")
cmd.Flags().String(optionNameTracingHost, "", "host to send tracing data")
cmd.Flags().String(optionNameTracingPort, "", "port to send tracing data")
cmd.Flags().String(optionNameTracingServiceName, "bee", "service name identifier for tracing") cmd.Flags().String(optionNameTracingServiceName, "bee", "service name identifier for tracing")
cmd.Flags().String(optionNameVerbosity, "info", "log verbosity level 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=trace") cmd.Flags().String(optionNameVerbosity, "info", "log verbosity level 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=trace")
cmd.Flags().String(optionWelcomeMessage, "", "send a welcome message string during handshakes") cmd.Flags().String(optionWelcomeMessage, "", "send a welcome message string during handshakes")
...@@ -245,6 +248,7 @@ func (c *command) setAllFlags(cmd *cobra.Command) { ...@@ -245,6 +248,7 @@ func (c *command) setAllFlags(cmd *cobra.Command) {
cmd.Flags().String(optionNameSwapDeploymentGasPrice, "", "gas price in wei to use for deployment and funding") cmd.Flags().String(optionNameSwapDeploymentGasPrice, "", "gas price in wei to use for deployment and funding")
cmd.Flags().Duration(optionWarmUpTime, time.Minute*20, "time to warmup the node before pull/push protocols can be kicked off.") cmd.Flags().Duration(optionWarmUpTime, time.Minute*20, "time to warmup the node before pull/push protocols can be kicked off.")
cmd.Flags().Bool(optionNameMainNet, false, "triggers connect to main net bootnodes.") cmd.Flags().Bool(optionNameMainNet, false, "triggers connect to main net bootnodes.")
cmd.Flags().Bool(optionNameRetrievalCaching, true, "enable forwarded content caching")
} }
func newLogger(cmd *cobra.Command, verbosity string) (logging.Logger, error) { func newLogger(cmd *cobra.Command, verbosity string) (logging.Logger, error) {
......
...@@ -106,7 +106,7 @@ damage to hardware or loss of funds associated with the Ethereum account connect ...@@ -106,7 +106,7 @@ damage to hardware or loss of funds associated with the Ethereum account connect
No developers or entity involved will be liable for any claims and damages associated with your use, No developers or entity involved will be liable for any claims and damages associated with your use,
inability to use, or your interaction with other nodes or the software.`) inability to use, or your interaction with other nodes or the software.`)
fmt.Printf("\n\nversion: %v - planned to be supported until %v, please follow http://ethswarm.org/\n\n", bee.Version, endSupportDate()) fmt.Printf("\n\nversion: %v - planned to be supported until %v, please follow https://ethswarm.org/\n\n", bee.Version, endSupportDate())
debugAPIAddr := c.config.GetString(optionNameDebugAPIAddr) debugAPIAddr := c.config.GetString(optionNameDebugAPIAddr)
if !c.config.GetBool(optionNameDebugAPIEnable) { if !c.config.GetBool(optionNameDebugAPIEnable) {
...@@ -128,18 +128,34 @@ inability to use, or your interaction with other nodes or the software.`) ...@@ -128,18 +128,34 @@ inability to use, or your interaction with other nodes or the software.`)
} }
mainnet := c.config.GetBool(optionNameMainNet) mainnet := c.config.GetBool(optionNameMainNet)
networkID := c.config.GetUint64(optionNameNetworkID) networkID := c.config.GetUint64(optionNameNetworkID)
networkID, err = parseNetworks(mainnet, networkID)
if err != nil { if mainnet {
return err userHasSetNetworkID := c.config.IsSet(optionNameNetworkID)
if userHasSetNetworkID && networkID != 1 {
return errors.New("provided network ID does not match mainnet")
}
networkID = 1
} }
bootnodes := c.config.GetStringSlice(optionNameBootnodes) bootnodes := c.config.GetStringSlice(optionNameBootnodes)
bootnodes = parseBootnodes(logger, mainnet, networkID, bootnodes)
blockTime := c.config.GetUint64(optionNameBlockTime) blockTime := c.config.GetUint64(optionNameBlockTime)
blockTime = parseBlockTime(mainnet, blockTime)
networkConfig := getConfigByNetworkID(networkID, blockTime)
if c.config.IsSet(optionNameBootnodes) {
networkConfig.bootNodes = bootnodes
}
if c.config.IsSet(optionNameBlockTime) && blockTime != 0 {
networkConfig.blockTime = blockTime
}
tracingEndpoint := c.config.GetString(optionNameTracingEndpoint)
if c.config.IsSet(optionNameTracingHost) && c.config.IsSet(optionNameTracingPort) {
tracingEndpoint = strings.Join([]string{c.config.GetString(optionNameTracingHost), c.config.GetString(optionNameTracingPort)}, ":")
}
b, err := node.NewBee(c.config.GetString(optionNameP2PAddr), signerConfig.publicKey, signerConfig.signer, networkID, logger, signerConfig.libp2pPrivateKey, signerConfig.pssPrivateKey, &node.Options{ b, err := node.NewBee(c.config.GetString(optionNameP2PAddr), signerConfig.publicKey, signerConfig.signer, networkID, logger, signerConfig.libp2pPrivateKey, signerConfig.pssPrivateKey, &node.Options{
DataDir: c.config.GetString(optionNameDataDir), DataDir: c.config.GetString(optionNameDataDir),
...@@ -155,11 +171,10 @@ inability to use, or your interaction with other nodes or the software.`) ...@@ -155,11 +171,10 @@ inability to use, or your interaction with other nodes or the software.`)
EnableWS: c.config.GetBool(optionNameP2PWSEnable), EnableWS: c.config.GetBool(optionNameP2PWSEnable),
EnableQUIC: c.config.GetBool(optionNameP2PQUICEnable), EnableQUIC: c.config.GetBool(optionNameP2PQUICEnable),
WelcomeMessage: c.config.GetString(optionWelcomeMessage), WelcomeMessage: c.config.GetString(optionWelcomeMessage),
Bootnodes: bootnodes, Bootnodes: networkConfig.bootNodes,
CORSAllowedOrigins: c.config.GetStringSlice(optionCORSAllowedOrigins), CORSAllowedOrigins: c.config.GetStringSlice(optionCORSAllowedOrigins),
Standalone: c.config.GetBool(optionNameStandalone),
TracingEnabled: c.config.GetBool(optionNameTracingEnabled), TracingEnabled: c.config.GetBool(optionNameTracingEnabled),
TracingEndpoint: c.config.GetString(optionNameTracingEndpoint), TracingEndpoint: tracingEndpoint,
TracingServiceName: c.config.GetString(optionNameTracingServiceName), TracingServiceName: c.config.GetString(optionNameTracingServiceName),
Logger: logger, Logger: logger,
GlobalPinningEnabled: c.config.GetBool(optionNameGlobalPinningEnabled), GlobalPinningEnabled: c.config.GetBool(optionNameGlobalPinningEnabled),
...@@ -179,9 +194,11 @@ inability to use, or your interaction with other nodes or the software.`) ...@@ -179,9 +194,11 @@ inability to use, or your interaction with other nodes or the software.`)
BlockHash: c.config.GetString(optionNameBlockHash), BlockHash: c.config.GetString(optionNameBlockHash),
PostageContractAddress: c.config.GetString(optionNamePostageContractAddress), PostageContractAddress: c.config.GetString(optionNamePostageContractAddress),
PriceOracleAddress: c.config.GetString(optionNamePriceOracleAddress), PriceOracleAddress: c.config.GetString(optionNamePriceOracleAddress),
BlockTime: blockTime, BlockTime: networkConfig.blockTime,
DeployGasPrice: c.config.GetString(optionNameSwapDeploymentGasPrice), DeployGasPrice: c.config.GetString(optionNameSwapDeploymentGasPrice),
WarmupTime: c.config.GetDuration(optionWarmUpTime), WarmupTime: c.config.GetDuration(optionWarmUpTime),
ChainID: networkConfig.chainID,
RetrievalCaching: c.config.GetBool(optionNameRetrievalCaching),
}) })
if err != nil { if err != nil {
return err return err
...@@ -420,36 +437,28 @@ func (c *command) configureSigner(cmd *cobra.Command, logger logging.Logger) (co ...@@ -420,36 +437,28 @@ func (c *command) configureSigner(cmd *cobra.Command, logger logging.Logger) (co
}, nil }, nil
} }
func parseNetworks(main bool, networkID uint64) (uint64, error) { type networkConfig struct {
if main && networkID != 1 { bootNodes []string
return 0, errors.New("provided network ID does not match mainnet") blockTime uint64
} chainID int64
return networkID, nil
} }
func parseBootnodes(log logging.Logger, main bool, networkID uint64, bootnodes []string) []string { func getConfigByNetworkID(networkID uint64, defaultBlockTime uint64) *networkConfig {
if len(bootnodes) > 0 { var config = networkConfig{
return bootnodes // use provided values blockTime: defaultBlockTime,
}
if main {
return []string{"/dnsaddr/mainnet.ethswarm.org"}
} }
switch networkID {
if networkID == 10 { case 1:
return []string{"/dnsaddr/testnet.ethswarm.org"} config.bootNodes = []string{"/dnsaddr/mainnet.ethswarm.org"}
} config.blockTime = 5
config.chainID = 100
log.Warning("no bootnodes defined for network ID", networkID) case 5: //staging
config.chainID = 5
return bootnodes case 10: //test
} config.chainID = 5
default: //will use the value provided by the chain
func parseBlockTime(main bool, blockTime uint64) uint64 { config.chainID = -1
if main {
return uint64(5 * time.Second)
} }
return blockTime return &config
} }
...@@ -3,48 +3,49 @@ module github.com/ethersphere/bee ...@@ -3,48 +3,49 @@ module github.com/ethersphere/bee
go 1.15 go 1.15
require ( require (
github.com/btcsuite/btcd v0.21.0-beta github.com/btcsuite/btcd v0.22.0-beta
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
github.com/coreos/go-semver v0.3.0 github.com/coreos/go-semver v0.3.0
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/ethereum/go-ethereum v1.9.23 github.com/ethereum/go-ethereum v1.9.23
github.com/ethersphere/go-price-oracle-abi v0.1.0 github.com/ethersphere/go-price-oracle-abi v0.1.0
github.com/ethersphere/go-storage-incentives-abi v0.3.0 github.com/ethersphere/go-storage-incentives-abi v0.3.0
github.com/ethersphere/go-sw3-abi v0.4.0 github.com/ethersphere/go-sw3-abi v0.4.0
github.com/ethersphere/langos v1.0.0 github.com/ethersphere/langos v1.0.0
github.com/gogo/protobuf v1.3.1 github.com/gogo/protobuf v1.3.2
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.0 github.com/google/go-cmp v0.5.5
github.com/google/gopacket v1.1.19 // indirect
github.com/google/uuid v1.1.4 // indirect github.com/google/uuid v1.1.4 // indirect
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 // indirect github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 // indirect
github.com/gorilla/handlers v1.4.2 github.com/gorilla/handlers v1.4.2
github.com/gorilla/mux v1.7.4 github.com/gorilla/mux v1.7.4
github.com/gorilla/websocket v1.4.2 github.com/gorilla/websocket v1.4.2
github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-multierror v1.1.1
github.com/huin/goupnp v1.0.1 // indirect
github.com/ipfs/go-log v1.0.5 // indirect
github.com/ipfs/go-log/v2 v2.3.0 // indirect
github.com/kardianos/service v1.2.0 github.com/kardianos/service v1.2.0
github.com/klauspost/cpuid/v2 v2.0.8 // indirect
github.com/koron/go-ssdp v0.0.2 // indirect github.com/koron/go-ssdp v0.0.2 // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
github.com/libp2p/go-libp2p v0.13.0 github.com/libp2p/go-libp2p v0.14.3
github.com/libp2p/go-libp2p-autonat v0.4.0 github.com/libp2p/go-libp2p-autonat v0.4.2
github.com/libp2p/go-libp2p-core v0.8.0 github.com/libp2p/go-libp2p-core v0.8.5
github.com/libp2p/go-libp2p-noise v0.1.2 // indirect github.com/libp2p/go-libp2p-discovery v0.5.1 // indirect
github.com/libp2p/go-libp2p-peerstore v0.2.6 github.com/libp2p/go-libp2p-peerstore v0.2.7
github.com/libp2p/go-libp2p-quic-transport v0.10.0 github.com/libp2p/go-libp2p-quic-transport v0.10.0
github.com/libp2p/go-libp2p-transport-upgrader v0.4.0 github.com/libp2p/go-libp2p-transport-upgrader v0.4.2
github.com/libp2p/go-netroute v0.1.4 // indirect github.com/libp2p/go-tcp-transport v0.2.3
github.com/libp2p/go-sockaddr v0.1.0 // indirect
github.com/libp2p/go-tcp-transport v0.2.1
github.com/libp2p/go-ws-transport v0.4.0 github.com/libp2p/go-ws-transport v0.4.0
github.com/mattn/go-colorable v0.1.2 // indirect github.com/mattn/go-colorable v0.1.2 // indirect
github.com/miekg/dns v1.1.43 // indirect
github.com/mitchellh/mapstructure v1.3.2 // indirect github.com/mitchellh/mapstructure v1.3.2 // indirect
github.com/multiformats/go-multiaddr v0.3.1 github.com/multiformats/go-multiaddr v0.3.3
github.com/multiformats/go-multiaddr-dns v0.2.0 github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/multiformats/go-multistream v0.2.0 github.com/multiformats/go-multistream v0.2.2
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/opentracing/opentracing-go v1.2.0 github.com/opentracing/opentracing-go v1.2.0
github.com/pelletier/go-toml v1.8.0 // indirect github.com/pelletier/go-toml v1.8.0 // indirect
github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_golang v1.11.0
github.com/prometheus/common v0.29.0 // indirect
github.com/prometheus/procfs v0.7.0 // indirect
github.com/sirupsen/logrus v1.6.0 github.com/sirupsen/logrus v1.6.0
github.com/smartystreets/assertions v1.1.1 // indirect github.com/smartystreets/assertions v1.1.1 // indirect
github.com/spf13/afero v1.3.1 // indirect github.com/spf13/afero v1.3.1 // indirect
...@@ -59,22 +60,18 @@ require ( ...@@ -59,22 +60,18 @@ require (
github.com/vmihailenco/msgpack/v5 v5.3.4 github.com/vmihailenco/msgpack/v5 v5.3.4
github.com/wealdtech/go-ens/v3 v3.4.4 github.com/wealdtech/go-ens/v3 v3.4.4
gitlab.com/nolash/go-mockbytes v0.0.7 gitlab.com/nolash/go-mockbytes v0.0.7
go.opencensus.io v0.22.5 // indirect go.uber.org/atomic v1.8.0
go.uber.org/atomic v1.7.0 go.uber.org/multierr v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.18.1 // indirect
go.uber.org/zap v1.16.0 // indirect golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad golang.org/x/net v0.0.0-20210614182718-04defd469f4e
golang.org/x/net v0.0.0-20201224014010-6772e930b67b golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
golang.org/x/sys v0.0.0-20210108172913-0df2131ae363
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf
golang.org/x/text v0.3.4 // indirect
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 golang.org/x/time v0.0.0-20191024005414-555d28b269f0
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/ini.v1 v1.57.0 // indirect gopkg.in/ini.v1 v1.57.0 // indirect
gopkg.in/yaml.v2 v2.3.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect
honnef.co/go/tools v0.0.1-2020.1.4 // indirect
resenje.org/singleflight v0.2.0 resenje.org/singleflight v0.2.0
resenje.org/web v0.4.3 resenje.org/web v0.4.3
) )
This source diff could not be displayed because it is too large. You can view the blob instead.
openapi: 3.0.3 openapi: 3.0.3
info: info:
version: 0.6.0 version: 1.0.0
title: Swarm API title: Swarm API
description: "A list of the currently provided Interfaces to interact with the swarm, implementing file operations and sending messages" description: "A list of the currently provided Interfaces to interact with the swarm, implementing file operations and sending messages"
...@@ -10,7 +10,7 @@ security: ...@@ -10,7 +10,7 @@ security:
externalDocs: externalDocs:
description: Browse the documentation @ the Swarm Docs description: Browse the documentation @ the Swarm Docs
url: "https://docs.swarm.eth" url: "https://docs.ethswarm.org"
servers: servers:
- url: "http://{apiRoot}:{port}/v1" - url: "http://{apiRoot}:{port}/v1"
...@@ -62,7 +62,7 @@ paths: ...@@ -62,7 +62,7 @@ paths:
"402": "402":
$ref: "SwarmCommon.yaml#/components/responses/402" $ref: "SwarmCommon.yaml#/components/responses/402"
"403": "403":
$ref: "SwarmCommon.yaml#/components/responses/403" $ref: "SwarmCommon.yaml#/components/responses/GatewayForbidden"
"500": "500":
$ref: "SwarmCommon.yaml#/components/responses/500" $ref: "SwarmCommon.yaml#/components/responses/500"
default: default:
...@@ -157,6 +157,8 @@ paths: ...@@ -157,6 +157,8 @@ paths:
$ref: "SwarmCommon.yaml#/components/responses/400" $ref: "SwarmCommon.yaml#/components/responses/400"
"402": "402":
$ref: "SwarmCommon.yaml#/components/responses/402" $ref: "SwarmCommon.yaml#/components/responses/402"
"403":
$ref: "SwarmCommon.yaml#/components/responses/GatewayForbidden"
"500": "500":
$ref: "SwarmCommon.yaml#/components/responses/500" $ref: "SwarmCommon.yaml#/components/responses/500"
default: default:
...@@ -171,8 +173,7 @@ paths: ...@@ -171,8 +173,7 @@ paths:
A multipart request is treated as a collection regardless of whether the swarm-collection header is present. This means in order to serve single files A multipart request is treated as a collection regardless of whether the swarm-collection header is present. This means in order to serve single files
uploaded as a multipart request, the swarm-index-document header should be used with the name of the file." uploaded as a multipart request, the swarm-index-document header should be used with the name of the file."
tags: tags:
- File - BZZ
- Collection
parameters: parameters:
- in: query - in: query
name: name name: name
...@@ -223,7 +224,7 @@ paths: ...@@ -223,7 +224,7 @@ paths:
"402": "402":
$ref: "SwarmCommon.yaml#/components/responses/402" $ref: "SwarmCommon.yaml#/components/responses/402"
"403": "403":
$ref: "SwarmCommon.yaml#/components/responses/403" $ref: "SwarmCommon.yaml#/components/responses/GatewayForbidden"
"500": "500":
$ref: "SwarmCommon.yaml#/components/responses/500" $ref: "SwarmCommon.yaml#/components/responses/500"
default: default:
...@@ -233,15 +234,14 @@ paths: ...@@ -233,15 +234,14 @@ paths:
patch: patch:
summary: "Reupload a root hash to the network" summary: "Reupload a root hash to the network"
tags: tags:
- Reupload file - BZZ
- Reupload collection
parameters: parameters:
- in: path - in: path
name: reference name: reference
schema: schema:
$ref: "SwarmCommon.yaml#/components/schemas/SwarmReference" $ref: "SwarmCommon.yaml#/components/schemas/SwarmReference"
required: true required: true
description: Root hash of content description: "Root hash of content (can be of any type: collection, file, chunk)"
responses: responses:
"200": "200":
description: Ok description: Ok
...@@ -252,7 +252,7 @@ paths: ...@@ -252,7 +252,7 @@ paths:
get: get:
summary: "Get file or index document from a collection of files" summary: "Get file or index document from a collection of files"
tags: tags:
- Collection - BZZ
parameters: parameters:
- in: path - in: path
name: reference name: reference
...@@ -287,7 +287,7 @@ paths: ...@@ -287,7 +287,7 @@ paths:
get: get:
summary: "Get referenced file from a collection of files" summary: "Get referenced file from a collection of files"
tags: tags:
- Collection - BZZ
parameters: parameters:
- in: path - in: path
name: reference name: reference
...@@ -354,7 +354,7 @@ paths: ...@@ -354,7 +354,7 @@ paths:
schema: schema:
$ref: "SwarmCommon.yaml#/components/schemas/TagsList" $ref: "SwarmCommon.yaml#/components/schemas/TagsList"
"403": "403":
$ref: "SwarmCommon.yaml#/components/responses/403" $ref: "SwarmCommon.yaml#/components/responses/GatewayForbidden"
"500": "500":
$ref: "SwarmCommon.yaml#/components/responses/500" $ref: "SwarmCommon.yaml#/components/responses/500"
default: default:
...@@ -377,7 +377,7 @@ paths: ...@@ -377,7 +377,7 @@ paths:
schema: schema:
$ref: "SwarmCommon.yaml#/components/schemas/NewTagResponse" $ref: "SwarmCommon.yaml#/components/schemas/NewTagResponse"
"403": "403":
$ref: "SwarmCommon.yaml#/components/responses/403" $ref: "SwarmCommon.yaml#/components/responses/GatewayForbidden"
"500": "500":
$ref: "SwarmCommon.yaml#/components/responses/500" $ref: "SwarmCommon.yaml#/components/responses/500"
default: default:
...@@ -405,7 +405,7 @@ paths: ...@@ -405,7 +405,7 @@ paths:
"400": "400":
$ref: "SwarmCommon.yaml#/components/responses/400" $ref: "SwarmCommon.yaml#/components/responses/400"
"403": "403":
$ref: "SwarmCommon.yaml#/components/responses/403" $ref: "SwarmCommon.yaml#/components/responses/GatewayForbidden"
"404": "404":
$ref: "SwarmCommon.yaml#/components/responses/404" $ref: "SwarmCommon.yaml#/components/responses/404"
"500": "500":
...@@ -429,7 +429,7 @@ paths: ...@@ -429,7 +429,7 @@ paths:
"400": "400":
$ref: "SwarmCommon.yaml#/components/responses/400" $ref: "SwarmCommon.yaml#/components/responses/400"
"403": "403":
$ref: "SwarmCommon.yaml#/components/responses/403" $ref: "SwarmCommon.yaml#/components/responses/GatewayForbidden"
"404": "404":
$ref: "SwarmCommon.yaml#/components/responses/404" $ref: "SwarmCommon.yaml#/components/responses/404"
"500": "500":
...@@ -462,7 +462,7 @@ paths: ...@@ -462,7 +462,7 @@ paths:
schema: schema:
$ref: "SwarmCommon.yaml#/components/schemas/Status" $ref: "SwarmCommon.yaml#/components/schemas/Status"
"403": "403":
$ref: "SwarmCommon.yaml#/components/responses/403" $ref: "SwarmCommon.yaml#/components/responses/GatewayForbidden"
"404": "404":
$ref: "SwarmCommon.yaml#/components/responses/404" $ref: "SwarmCommon.yaml#/components/responses/404"
"500": "500":
...@@ -481,7 +481,7 @@ paths: ...@@ -481,7 +481,7 @@ paths:
post: post:
summary: Pin the root hash with the given reference summary: Pin the root hash with the given reference
tags: tags:
- Root hash pinning - Pinning
responses: responses:
"200": "200":
description: Pin already exists, so no operation description: Pin already exists, so no operation
...@@ -498,7 +498,7 @@ paths: ...@@ -498,7 +498,7 @@ paths:
"400": "400":
$ref: "SwarmCommon.yaml#/components/responses/400" $ref: "SwarmCommon.yaml#/components/responses/400"
"403": "403":
$ref: "SwarmCommon.yaml#/components/responses/403" $ref: "SwarmCommon.yaml#/components/responses/GatewayForbidden"
"404": "404":
$ref: "SwarmCommon.yaml#/components/responses/404" $ref: "SwarmCommon.yaml#/components/responses/404"
"500": "500":
...@@ -508,7 +508,7 @@ paths: ...@@ -508,7 +508,7 @@ paths:
delete: delete:
summary: Unpin the root hash with the given reference summary: Unpin the root hash with the given reference
tags: tags:
- Root hash pinning - Pinning
responses: responses:
"200": "200":
description: Unpinning root hash with reference description: Unpinning root hash with reference
...@@ -519,7 +519,7 @@ paths: ...@@ -519,7 +519,7 @@ paths:
"400": "400":
$ref: "SwarmCommon.yaml#/components/responses/400" $ref: "SwarmCommon.yaml#/components/responses/400"
"403": "403":
$ref: "SwarmCommon.yaml#/components/responses/403" $ref: "SwarmCommon.yaml#/components/responses/GatewayForbidden"
"500": "500":
$ref: "SwarmCommon.yaml#/components/responses/500" $ref: "SwarmCommon.yaml#/components/responses/500"
default: default:
...@@ -527,7 +527,7 @@ paths: ...@@ -527,7 +527,7 @@ paths:
get: get:
summary: Get pinning status of the root hash with the given reference summary: Get pinning status of the root hash with the given reference
tags: tags:
- Root hash pinning - Pinning
responses: responses:
"200": "200":
description: Reference of the pinned root hash description: Reference of the pinned root hash
...@@ -538,7 +538,7 @@ paths: ...@@ -538,7 +538,7 @@ paths:
"400": "400":
$ref: "SwarmCommon.yaml#/components/responses/400" $ref: "SwarmCommon.yaml#/components/responses/400"
"403": "403":
$ref: "SwarmCommon.yaml#/components/responses/403" $ref: "SwarmCommon.yaml#/components/responses/GatewayForbidden"
"404": "404":
$ref: "SwarmCommon.yaml#/components/responses/404" $ref: "SwarmCommon.yaml#/components/responses/404"
"500": "500":
...@@ -550,7 +550,7 @@ paths: ...@@ -550,7 +550,7 @@ paths:
get: get:
summary: Get the list of pinned root hash references summary: Get the list of pinned root hash references
tags: tags:
- Root hash pinning - Pinning
responses: responses:
"200": "200":
description: List of pinned root hash references description: List of pinned root hash references
...@@ -559,7 +559,7 @@ paths: ...@@ -559,7 +559,7 @@ paths:
schema: schema:
$ref: "SwarmCommon.yaml#/components/schemas/SwarmOnlyReferencesList" $ref: "SwarmCommon.yaml#/components/schemas/SwarmOnlyReferencesList"
"403": "403":
$ref: "SwarmCommon.yaml#/components/responses/403" $ref: "SwarmCommon.yaml#/components/responses/GatewayForbidden"
"500": "500":
$ref: "SwarmCommon.yaml#/components/responses/500" $ref: "SwarmCommon.yaml#/components/responses/500"
default: default:
...@@ -597,6 +597,8 @@ paths: ...@@ -597,6 +597,8 @@ paths:
$ref: "SwarmCommon.yaml#/components/responses/400" $ref: "SwarmCommon.yaml#/components/responses/400"
"402": "402":
$ref: "SwarmCommon.yaml#/components/responses/402" $ref: "SwarmCommon.yaml#/components/responses/402"
"403":
$ref: "SwarmCommon.yaml#/components/responses/GatewayForbidden"
"500": "500":
$ref: "SwarmCommon.yaml#/components/responses/500" $ref: "SwarmCommon.yaml#/components/responses/500"
default: default:
...@@ -617,6 +619,8 @@ paths: ...@@ -617,6 +619,8 @@ paths:
responses: responses:
"200": "200":
description: Returns a WebSocket with a subscription for incoming message data on the requested topic. description: Returns a WebSocket with a subscription for incoming message data on the requested topic.
"403":
$ref: "SwarmCommon.yaml#/components/responses/GatewayForbidden"
"500": "500":
$ref: "SwarmCommon.yaml#/components/responses/500" $ref: "SwarmCommon.yaml#/components/responses/500"
default: default:
...@@ -660,6 +664,8 @@ paths: ...@@ -660,6 +664,8 @@ paths:
$ref: "SwarmCommon.yaml#/components/responses/401" $ref: "SwarmCommon.yaml#/components/responses/401"
"402": "402":
$ref: "SwarmCommon.yaml#/components/responses/402" $ref: "SwarmCommon.yaml#/components/responses/402"
"403":
$ref: "SwarmCommon.yaml#/components/responses/GatewayForbidden"
"500": "500":
$ref: "SwarmCommon.yaml#/components/responses/500" $ref: "SwarmCommon.yaml#/components/responses/500"
default: default:
...@@ -704,6 +710,8 @@ paths: ...@@ -704,6 +710,8 @@ paths:
$ref: "SwarmCommon.yaml#/components/responses/401" $ref: "SwarmCommon.yaml#/components/responses/401"
"402": "402":
$ref: "SwarmCommon.yaml#/components/responses/402" $ref: "SwarmCommon.yaml#/components/responses/402"
"403":
$ref: "SwarmCommon.yaml#/components/responses/GatewayForbidden"
"500": "500":
$ref: "SwarmCommon.yaml#/components/responses/500" $ref: "SwarmCommon.yaml#/components/responses/500"
default: default:
...@@ -757,3 +765,87 @@ paths: ...@@ -757,3 +765,87 @@ paths:
$ref: "SwarmCommon.yaml#/components/responses/500" $ref: "SwarmCommon.yaml#/components/responses/500"
default: default:
description: Default response description: Default response
"/stamps":
get:
summary: Get all available stamps for this node
deprecated: true
tags:
- Postage Stamps
responses:
"200":
description: Returns an array of all available postage batches.
content:
application/json:
schema:
$ref: "SwarmCommon.yaml#/components/schemas/PostageBatchesResponse"
default:
description: Default response
"/stamps/{id}":
parameters:
- in: path
name: id
schema:
$ref: "SwarmCommon.yaml#/components/schemas/BatchID"
required: true
description: Swarm address of the stamp
get:
summary: Get an individual postage batch status
deprecated: true
tags:
- Postage Stamps
responses:
"200":
description: Returns an individual postage batch state
content:
application/json:
schema:
$ref: "SwarmCommon.yaml#/components/schemas/PostageBatch"
"400":
$ref: "SwarmCommon.yaml#/components/responses/400"
default:
description: Default response
"/stamps/{amount}/{depth}":
post:
summary: Buy a new postage batch.
description: Be aware, this endpoint creates an on-chain transactions and transfers BZZ from the node's Ethereum account and hence directly manipulates the wallet balance!
deprecated: true
tags:
- Postage Stamps
parameters:
- in: path
name: amount
schema:
type: integer
required: true
description: Amount of BZZ added that the postage batch will have.
- in: path
name: depth
schema:
type: integer
required: true
description: Batch depth which specifies how many chunks can be signed with the batch. It is a logarithm. Must be higher than default bucket depth (16)
- in: query
name: label
schema:
type: string
required: false
description: An optional label for this batch
- $ref: "SwarmCommon.yaml#/components/parameters/GasPriceParameter"
responses:
"201":
description: Returns the newly created postage batch ID
content:
application/json:
schema:
$ref: "SwarmCommon.yaml#/components/schemas/BatchIDResponse"
"400":
$ref: "SwarmCommon.yaml#/components/responses/400"
"500":
$ref: "SwarmCommon.yaml#/components/responses/500"
default:
description: Default response
openapi: 3.0.3 openapi: 3.0.3
info: info:
version: 0.6.0 version: 1.0.0
title: Common Data Types title: Common Data Types
description: | description: |
\*****bzzz***** \*****bzzz*****
...@@ -333,6 +333,31 @@ components: ...@@ -333,6 +333,31 @@ components:
type: integer type: integer
immutableFlag: immutableFlag:
type: boolean type: boolean
exists:
type: boolean
StampBucketData:
type: object
properties:
bucketID:
type: integer
collisions:
type: integer
PostageStampBuckets:
type: object
properties:
depth:
type: integer
bucketDepth:
type: integer
bucketUpperBound:
type: integer
buckets:
type: array
nullable: true
items:
$ref: "#/components/schemas/StampBucketData"
Settlement: Settlement:
type: object type: object
...@@ -449,17 +474,17 @@ components: ...@@ -449,17 +474,17 @@ components:
nonce: nonce:
type: integer type: integer
gasPrice: gasPrice:
type: "#/components/schemas/BigInt" $ref: "#/components/schemas/BigInt"
gasLimit: gasLimit:
type: integer type: integer
data: data:
type: string type: string
created: created:
type: "#/components/schemas/DateTime" $ref: "#/components/schemas/DateTime"
description: description:
type: string type: string
value: value:
type: "#/components/schemas/BigInt" $ref: "#/components/schemas/BigInt"
PendingTransactionReponse: PendingTransactionReponse:
type: object type: object
...@@ -554,7 +579,10 @@ components: ...@@ -554,7 +579,10 @@ components:
schema: schema:
type: boolean type: boolean
required: false required: false
description: Represents the pinning state of the chunk description: >
Represents if the uploaded data should be also locally pinned on the node.
Warning! Not available for nodes that run in Gateway mode!
SwarmEncryptParameter: SwarmEncryptParameter:
in: header in: header
...@@ -562,7 +590,10 @@ components: ...@@ -562,7 +590,10 @@ components:
schema: schema:
type: boolean type: boolean
required: false required: false
description: Represents the encrypting state of the file description: >
Represents the encrypting state of the file
Warning! Not available for nodes that run in Gateway mode!
ContentTypePreserved: ContentTypePreserved:
in: header in: header
...@@ -603,7 +634,7 @@ components: ...@@ -603,7 +634,7 @@ components:
description: "ID of Postage Batch that is used to upload data with" description: "ID of Postage Batch that is used to upload data with"
required: true required: true
schema: schema:
$ref: "SwarmCommon.yaml#/components/schemas/SwarmAddress" $ref: "#/components/schemas/SwarmAddress"
responses: responses:
"204": "204":
...@@ -626,12 +657,6 @@ components: ...@@ -626,12 +657,6 @@ components:
application/problem+json: application/problem+json:
schema: schema:
$ref: "#/components/schemas/ProblemDetails" $ref: "#/components/schemas/ProblemDetails"
"403":
description: Forbidden
content:
application/problem+json:
schema:
$ref: "#/components/schemas/ProblemDetails"
"404": "404":
description: Not Found description: Not Found
content: content:
...@@ -644,3 +669,10 @@ components: ...@@ -644,3 +669,10 @@ components:
application/problem+json: application/problem+json:
schema: schema:
$ref: "#/components/schemas/ProblemDetails" $ref: "#/components/schemas/ProblemDetails"
"GatewayForbidden":
description: "Endpoint or header (pinning or encryption headers) forbidden in Gateway mode"
content:
application/problem+json:
schema:
$ref: "#/components/schemas/ProblemDetails"
\ No newline at end of file
openapi: 3.0.3 openapi: 3.0.3
info: info:
version: 0.6.0 version: 1.0.0
title: Bee Debug API title: Bee Debug API
description: "A list of the currently provided debug interfaces to interact with the bee node" description: "A list of the currently provided debug interfaces to interact with the bee node"
...@@ -9,7 +9,7 @@ security: ...@@ -9,7 +9,7 @@ security:
externalDocs: externalDocs:
description: Browse the documentation @ the Swarm Docs description: Browse the documentation @ the Swarm Docs
url: "https://docs.swarm.eth" url: "https://docs.ethswarm.org"
servers: servers:
- url: "http://{apiRoot}:{port}" - url: "http://{apiRoot}:{port}"
...@@ -663,8 +663,6 @@ paths: ...@@ -663,8 +663,6 @@ paths:
$ref: "SwarmCommon.yaml#/components/schemas/NewTagDebugResponse" $ref: "SwarmCommon.yaml#/components/schemas/NewTagDebugResponse"
"400": "400":
$ref: "SwarmCommon.yaml#/components/responses/400" $ref: "SwarmCommon.yaml#/components/responses/400"
"403":
$ref: "SwarmCommon.yaml#/components/responses/403"
"500": "500":
$ref: "SwarmCommon.yaml#/components/responses/500" $ref: "SwarmCommon.yaml#/components/responses/500"
default: default:
...@@ -802,9 +800,34 @@ paths: ...@@ -802,9 +800,34 @@ paths:
default: default:
description: Default response description: Default response
"/stamps/{id}/buckets":
parameters:
- in: path
name: id
schema:
$ref: "SwarmCommon.yaml#/components/schemas/BatchID"
required: true
description: Swarm address of the stamp
get:
summary: Get extended bucket data of a batch
tags:
- Postage Stamps
responses:
"200":
description: Returns extended bucket data of the provided batch ID
content:
application/json:
schema:
$ref: "SwarmCommon.yaml#/components/schemas/PostageStampBuckets"
"400":
$ref: "SwarmCommon.yaml#/components/responses/400"
default:
description: Default response
"/stamps/{amount}/{depth}": "/stamps/{amount}/{depth}":
post: post:
summary: Buy a new postage batch. Be aware, this endpoint create an on-chain transactions and transfers BZZ from the node's Ethereum account and hence directly manipulates the wallet balance! summary: Buy a new postage batch.
description: Be aware, this endpoint creates an on-chain transactions and transfers BZZ from the node's Ethereum account and hence directly manipulates the wallet balance!
tags: tags:
- Postage Stamps - Postage Stamps
parameters: parameters:
...@@ -826,6 +849,11 @@ paths: ...@@ -826,6 +849,11 @@ paths:
type: string type: string
required: false required: false
description: An optional label for this batch description: An optional label for this batch
- in: header
name: immutable
schema:
type: boolean
required: false
- $ref: "SwarmCommon.yaml#/components/parameters/GasPriceParameter" - $ref: "SwarmCommon.yaml#/components/parameters/GasPriceParameter"
responses: responses:
"201": "201":
...@@ -840,25 +868,3 @@ paths: ...@@ -840,25 +868,3 @@ paths:
$ref: "SwarmCommon.yaml#/components/responses/500" $ref: "SwarmCommon.yaml#/components/responses/500"
default: default:
description: Default response description: Default response
"/stamps/default/{id}":
parameters:
- in: path
name: id
schema:
$ref: "SwarmCommon.yaml#/components/schemas/BatchID"
required: true
description: Swarm address of the stamp
put:
summary: Set the default postage stamp issuer
tags:
- Postage Stamps
responses:
"204":
description: The default stamp issuer was updated successfully
"400":
$ref: "SwarmCommon.yaml#/components/responses/400"
"500":
$ref: "SwarmCommon.yaml#/components/responses/500"
default:
description: Default response
\ No newline at end of file
...@@ -24,7 +24,7 @@ The node's output was: ...@@ -24,7 +24,7 @@ The node's output was:
*) *)
ETH_ADDRESS=$(echo "$RESP" | grep ethereum | cut -d' ' -f6 | tr -d '"') ETH_ADDRESS=$(echo "$RESP" | grep ethereum | cut -d' ' -f6 | tr -d '"')
echo " echo "
Please make sure there is sufficient ETH and BZZ available on the node's Ethereum address: $ETH_ADDRESS. Please make sure there is sufficient native tokens and BZZ available on the node's Ethereum address at the APPROPRIATE BLOCKCHAIN: 0x$ETH_ADDRESS.
Learn how to fund your node by visiting our docs at at https://docs.ethswarm.org/docs/installation/fund-your-node Learn how to fund your node by visiting our docs at at https://docs.ethswarm.org/docs/installation/fund-your-node
......
...@@ -62,8 +62,6 @@ password-file: /var/lib/bee/password ...@@ -62,8 +62,6 @@ password-file: /var/lib/bee/password
# postage-stamp-address: "" # postage-stamp-address: ""
## ENS compatible API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url ## ENS compatible API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url
# resolver-options: [] # resolver-options: []
## whether we want the node to start with no listen addresses for p2p
# standalone: false
## enable swap (default true) ## enable swap (default true)
# swap-enable: true # swap-enable: true
## swap ethereum blockchain endpoint (default "ws://localhost:8546") ## swap ethereum blockchain endpoint (default "ws://localhost:8546")
......
...@@ -45,7 +45,6 @@ services: ...@@ -45,7 +45,6 @@ services:
- BEE_PAYMENT_TOLERANCE - BEE_PAYMENT_TOLERANCE
- BEE_POSTAGE_STAMP_ADDRESS - BEE_POSTAGE_STAMP_ADDRESS
- BEE_RESOLVER_OPTIONS - BEE_RESOLVER_OPTIONS
- BEE_STANDALONE
- BEE_SWAP_ENABLE - BEE_SWAP_ENABLE
- BEE_SWAP_ENDPOINT - BEE_SWAP_ENDPOINT
- BEE_SWAP_FACTORY_ADDRESS - BEE_SWAP_FACTORY_ADDRESS
......
...@@ -69,8 +69,6 @@ BEE_CLEF_SIGNER_ENABLE=true ...@@ -69,8 +69,6 @@ BEE_CLEF_SIGNER_ENABLE=true
# BEE_POSTAGE_STAMP_ADDRESS= # BEE_POSTAGE_STAMP_ADDRESS=
## ENS compatible API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url ## ENS compatible API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url
# BEE_RESOLVER_OPTIONS=[] # BEE_RESOLVER_OPTIONS=[]
## whether we want the node to start with no listen addresses for p2p
# BEE_STANDALONE=false
## enable swap (default true) ## enable swap (default true)
# BEE_SWAP_ENABLE=true # BEE_SWAP_ENABLE=true
## swap ethereum blockchain endpoint (default ws://localhost:8546) ## swap ethereum blockchain endpoint (default ws://localhost:8546)
......
...@@ -62,8 +62,6 @@ password-file: /usr/local/var/lib/swarm-bee/password ...@@ -62,8 +62,6 @@ password-file: /usr/local/var/lib/swarm-bee/password
# postage-stamp-address: "" # postage-stamp-address: ""
## ENS compatible API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url ## ENS compatible API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url
# resolver-options: [] # resolver-options: []
## whether we want the node to start with no listen addresses for p2p
# standalone: false
## enable swap (default true) ## enable swap (default true)
# swap-enable: true # swap-enable: true
## swap ethereum blockchain endpoint (default "ws://localhost:8546") ## swap ethereum blockchain endpoint (default "ws://localhost:8546")
......
...@@ -52,8 +52,6 @@ password-file: ./password ...@@ -52,8 +52,6 @@ password-file: ./password
# postage-stamp-address: "" # postage-stamp-address: ""
## ENS compatible API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url ## ENS compatible API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url
# resolver-options: [] # resolver-options: []
## whether we want the node to start with no listen addresses for p2p
# standalone: false
## enable swap (default true) ## enable swap (default true)
# swap-enable: true # swap-enable: true
## swap ethereum blockchain endpoint (default "ws://localhost:8546") ## swap ethereum blockchain endpoint (default "ws://localhost:8546")
......
...@@ -69,14 +69,13 @@ const ( ...@@ -69,14 +69,13 @@ const (
) )
var ( var (
errInvalidNameOrAddress = errors.New("invalid name or bzz address") errInvalidNameOrAddress = errors.New("invalid name or bzz address")
errNoResolver = errors.New("no resolver connected") errNoResolver = errors.New("no resolver connected")
errInvalidRequest = errors.New("could not validate request") errInvalidRequest = errors.New("could not validate request")
errInvalidContentType = errors.New("invalid content-type") errInvalidContentType = errors.New("invalid content-type")
errDirectoryStore = errors.New("could not store directory") errDirectoryStore = errors.New("could not store directory")
errFileStore = errors.New("could not store file") errFileStore = errors.New("could not store file")
errInvalidPostageBatch = errors.New("invalid postage batch id") errInvalidPostageBatch = errors.New("invalid postage batch id")
errSwarmPostageBatchIDHeaderNotFound = fmt.Errorf("header %s not found", SwarmPostageBatchIdHeader)
) )
// Service is the API service interface. // Service is the API service interface.
...@@ -238,7 +237,7 @@ func requestPostageBatchId(r *http.Request) ([]byte, error) { ...@@ -238,7 +237,7 @@ func requestPostageBatchId(r *http.Request) ([]byte, error) {
return b, nil return b, nil
} }
return nil, errSwarmPostageBatchIDHeaderNotFound return nil, errInvalidPostageBatch
} }
func (s *server) newTracingHandler(spanName string) func(h http.Handler) http.Handler { func (s *server) newTracingHandler(spanName string) func(h http.Handler) http.Handler {
......
...@@ -284,7 +284,7 @@ func TestPostageHeaderError(t *testing.T) { ...@@ -284,7 +284,7 @@ func TestPostageHeaderError(t *testing.T) {
for _, endpoint := range endpoints { for _, endpoint := range endpoints {
t.Run(endpoint+": empty batch", func(t *testing.T) { t.Run(endpoint+": empty batch", func(t *testing.T) {
hexbatch := hex.EncodeToString(batchEmpty) hexbatch := hex.EncodeToString(batchEmpty)
expCode := http.StatusCreated expCode := http.StatusBadRequest
jsonhttptest.Request(t, client, http.MethodPost, "/"+endpoint, expCode, jsonhttptest.Request(t, client, http.MethodPost, "/"+endpoint, expCode,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch), jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "application/octet-stream"), jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "application/octet-stream"),
......
...@@ -52,10 +52,7 @@ func (s *server) bytesUploadHandler(w http.ResponseWriter, r *http.Request) { ...@@ -52,10 +52,7 @@ func (s *server) bytesUploadHandler(w http.ResponseWriter, r *http.Request) {
ctx := sctx.SetTag(r.Context(), tag) ctx := sctx.SetTag(r.Context(), tag)
batch, err := requestPostageBatchId(r) batch, err := requestPostageBatchId(r)
switch { if err != nil {
case errors.Is(err, errSwarmPostageBatchIDHeaderNotFound) && s.post.DefaultIssuer() != nil:
batch = s.post.DefaultIssuer().ID()
case err != nil:
logger.Debugf("bytes upload: postage batch id:%v", err) logger.Debugf("bytes upload: postage batch id:%v", err)
logger.Error("bytes upload: postage batch id") logger.Error("bytes upload: postage batch id")
jsonhttp.BadRequest(w, nil) jsonhttp.BadRequest(w, nil)
......
...@@ -46,10 +46,7 @@ func (s *server) bzzUploadHandler(w http.ResponseWriter, r *http.Request) { ...@@ -46,10 +46,7 @@ func (s *server) bzzUploadHandler(w http.ResponseWriter, r *http.Request) {
} }
batch, err := requestPostageBatchId(r) batch, err := requestPostageBatchId(r)
switch { if err != nil {
case errors.Is(err, errSwarmPostageBatchIDHeaderNotFound) && s.post.DefaultIssuer() != nil:
batch = s.post.DefaultIssuer().ID()
case err != nil:
logger.Debugf("bzz upload: postage batch id: %v", err) logger.Debugf("bzz upload: postage batch id: %v", err)
logger.Error("bzz upload: postage batch id") logger.Error("bzz upload: postage batch id")
jsonhttp.BadRequest(w, "invalid postage batch id") jsonhttp.BadRequest(w, "invalid postage batch id")
......
...@@ -86,10 +86,7 @@ func (s *server) chunkUploadHandler(w http.ResponseWriter, r *http.Request) { ...@@ -86,10 +86,7 @@ func (s *server) chunkUploadHandler(w http.ResponseWriter, r *http.Request) {
} }
batch, err := requestPostageBatchId(r) batch, err := requestPostageBatchId(r)
switch { if err != nil {
case errors.Is(err, errSwarmPostageBatchIDHeaderNotFound) && s.post.DefaultIssuer() != nil:
batch = s.post.DefaultIssuer().ID()
case err != nil:
s.logger.Debugf("chunk upload: postage batch id: %v", err) s.logger.Debugf("chunk upload: postage batch id: %v", err)
s.logger.Error("chunk upload: postage batch id") s.logger.Error("chunk upload: postage batch id")
jsonhttp.BadRequest(w, "invalid postage batch id") jsonhttp.BadRequest(w, "invalid postage batch id")
......
...@@ -17,6 +17,9 @@ type ( ...@@ -17,6 +17,9 @@ type (
TagResponse = tagResponse TagResponse = tagResponse
TagRequest = tagRequest TagRequest = tagRequest
ListTagsResponse = listTagsResponse ListTagsResponse = listTagsResponse
PostageCreateResponse = postageCreateResponse
PostageStampResponse = postageStampResponse
PostageStampsResponse = postageStampsResponse
) )
var ( var (
......
...@@ -142,10 +142,7 @@ func (s *server) feedPostHandler(w http.ResponseWriter, r *http.Request) { ...@@ -142,10 +142,7 @@ func (s *server) feedPostHandler(w http.ResponseWriter, r *http.Request) {
} }
batch, err := requestPostageBatchId(r) batch, err := requestPostageBatchId(r)
switch { if err != nil {
case errors.Is(err, errSwarmPostageBatchIDHeaderNotFound) && s.post.DefaultIssuer() != nil:
batch = s.post.DefaultIssuer().ID()
case err != nil:
s.logger.Debugf("feed put: postage batch id: %v", err) s.logger.Debugf("feed put: postage batch id: %v", err)
s.logger.Error("feed put: postage batch id") s.logger.Error("feed put: postage batch id")
jsonhttp.BadRequest(w, "invalid postage batch id") jsonhttp.BadRequest(w, "invalid postage batch id")
......
...@@ -212,9 +212,9 @@ func TestFeed_Post(t *testing.T) { ...@@ -212,9 +212,9 @@ func TestFeed_Post(t *testing.T) {
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch), jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
) )
}) })
t.Run("ok - batch empty", func(t *testing.T) { t.Run("bad request - batch empty", func(t *testing.T) {
hexbatch := hex.EncodeToString(batchEmpty) hexbatch := hex.EncodeToString(batchEmpty)
jsonhttptest.Request(t, client, http.MethodPost, url, http.StatusCreated, jsonhttptest.Request(t, client, http.MethodPost, url, http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch), jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
) )
}) })
......
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package api
import (
"encoding/hex"
"encoding/json"
"errors"
"math/big"
"net/http"
"strconv"
"github.com/ethersphere/bee/pkg/bigint"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/postage/postagecontract"
"github.com/ethersphere/bee/pkg/sctx"
"github.com/gorilla/mux"
)
const (
gasPriceHeader = "Gas-Price"
immutableHeader = "Immutable"
errBadGasPrice = "bad gas price"
)
type batchID []byte
func (b batchID) MarshalJSON() ([]byte, error) {
return json.Marshal(hex.EncodeToString(b))
}
type postageCreateResponse struct {
BatchID batchID `json:"batchID"`
}
func (s *server) postageCreateHandler(w http.ResponseWriter, r *http.Request) {
depthStr := mux.Vars(r)["depth"]
amount, ok := big.NewInt(0).SetString(mux.Vars(r)["amount"], 10)
if !ok {
s.logger.Error("create batch: invalid amount")
jsonhttp.BadRequest(w, "invalid postage amount")
return
}
depth, err := strconv.ParseUint(depthStr, 10, 8)
if err != nil {
s.logger.Debugf("create batch: invalid depth: %v", err)
s.logger.Error("create batch: invalid depth")
jsonhttp.BadRequest(w, "invalid depth")
return
}
label := r.URL.Query().Get("label")
ctx := r.Context()
if price, ok := r.Header[gasPriceHeader]; ok {
p, ok := big.NewInt(0).SetString(price[0], 10)
if !ok {
s.logger.Error("create batch: bad gas price")
jsonhttp.BadRequest(w, errBadGasPrice)
return
}
ctx = sctx.SetGasPrice(ctx, p)
}
var immutable bool
if val, ok := r.Header[immutableHeader]; ok {
immutable, _ = strconv.ParseBool(val[0])
}
batchID, err := s.postageContract.CreateBatch(ctx, amount, uint8(depth), immutable, label)
if err != nil {
if errors.Is(err, postagecontract.ErrInsufficientFunds) {
s.logger.Debugf("create batch: out of funds: %v", err)
s.logger.Error("create batch: out of funds")
jsonhttp.BadRequest(w, "out of funds")
return
}
if errors.Is(err, postagecontract.ErrInvalidDepth) {
s.logger.Debugf("create batch: invalid depth: %v", err)
s.logger.Error("create batch: invalid depth")
jsonhttp.BadRequest(w, "invalid depth")
return
}
s.logger.Debugf("create batch: failed to create: %v", err)
s.logger.Error("create batch: failed to create")
jsonhttp.InternalServerError(w, "cannot create batch")
return
}
jsonhttp.Created(w, &postageCreateResponse{
BatchID: batchID,
})
}
type postageStampResponse struct {
BatchID batchID `json:"batchID"`
Utilization uint32 `json:"utilization"`
Usable bool `json:"usable"`
Label string `json:"label"`
Depth uint8 `json:"depth"`
Amount *bigint.BigInt `json:"amount"`
BucketDepth uint8 `json:"bucketDepth"`
BlockNumber uint64 `json:"blockNumber"`
ImmutableFlag bool `json:"immutableFlag"`
}
type postageStampsResponse struct {
Stamps []postageStampResponse `json:"stamps"`
}
func (s *server) postageGetStampsHandler(w http.ResponseWriter, _ *http.Request) {
resp := postageStampsResponse{}
for _, v := range s.post.StampIssuers() {
resp.Stamps = append(resp.Stamps, postageStampResponse{
BatchID: v.ID(),
Utilization: v.Utilization(),
Usable: s.post.IssuerUsable(v),
Label: v.Label(),
Depth: v.Depth(),
Amount: bigint.Wrap(v.Amount()),
BucketDepth: v.BucketDepth(),
BlockNumber: v.BlockNumber(),
ImmutableFlag: v.ImmutableFlag(),
})
}
jsonhttp.OK(w, resp)
}
func (s *server) postageGetStampHandler(w http.ResponseWriter, r *http.Request) {
idStr := mux.Vars(r)["id"]
if idStr == "" || len(idStr) != 64 {
s.logger.Error("get stamp issuer: invalid batchID")
jsonhttp.BadRequest(w, "invalid batchID")
return
}
id, err := hex.DecodeString(idStr)
if err != nil {
s.logger.Error("get stamp issuer: invalid batchID: %v", err)
s.logger.Error("get stamp issuer: invalid batchID")
jsonhttp.BadRequest(w, "invalid batchID")
return
}
issuer, err := s.post.GetStampIssuer(id)
if err != nil {
s.logger.Error("get stamp issuer: get issuer: %v", err)
s.logger.Error("get stamp issuer: get issuer")
jsonhttp.BadRequest(w, "cannot get issuer")
return
}
resp := postageStampResponse{
BatchID: id,
Utilization: issuer.Utilization(),
Usable: s.post.IssuerUsable(issuer),
Label: issuer.Label(),
Depth: issuer.Depth(),
Amount: bigint.Wrap(issuer.Amount()),
BucketDepth: issuer.BucketDepth(),
BlockNumber: issuer.BlockNumber(),
ImmutableFlag: issuer.ImmutableFlag(),
}
jsonhttp.OK(w, &resp)
}
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package api_test
import (
"context"
"encoding/hex"
"errors"
"fmt"
"math/big"
"net/http"
"testing"
"github.com/ethersphere/bee/pkg/api"
"github.com/ethersphere/bee/pkg/bigint"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest"
"github.com/ethersphere/bee/pkg/postage"
mockpost "github.com/ethersphere/bee/pkg/postage/mock"
"github.com/ethersphere/bee/pkg/postage/postagecontract"
contractMock "github.com/ethersphere/bee/pkg/postage/postagecontract/mock"
"github.com/ethersphere/bee/pkg/sctx"
)
func TestPostageCreateStamp(t *testing.T) {
batchID := []byte{1, 2, 3, 4}
initialBalance := int64(1000)
depth := uint8(1)
label := "label"
createBatch := func(amount int64, depth uint8, label string) string {
return fmt.Sprintf("/stamps/%d/%d?label=%s", amount, depth, label)
}
t.Run("ok", func(t *testing.T) {
contract := contractMock.New(
contractMock.WithCreateBatchFunc(func(ctx context.Context, ib *big.Int, d uint8, i bool, l string) ([]byte, error) {
if ib.Cmp(big.NewInt(initialBalance)) != 0 {
return nil, fmt.Errorf("called with wrong initial balance. wanted %d, got %d", initialBalance, ib)
}
if d != depth {
return nil, fmt.Errorf("called with wrong depth. wanted %d, got %d", depth, d)
}
if l != label {
return nil, fmt.Errorf("called with wrong label. wanted %s, got %s", label, l)
}
return batchID, nil
}),
)
client, _, _ := newTestServer(t, testServerOptions{
PostageContract: contract,
})
jsonhttptest.Request(t, client, http.MethodPost, createBatch(initialBalance, depth, label), http.StatusCreated,
jsonhttptest.WithExpectedJSONResponse(&api.PostageCreateResponse{
BatchID: batchID,
}),
)
})
t.Run("with-custom-gas", func(t *testing.T) {
contract := contractMock.New(
contractMock.WithCreateBatchFunc(func(ctx context.Context, ib *big.Int, d uint8, i bool, l string) ([]byte, error) {
if ib.Cmp(big.NewInt(initialBalance)) != 0 {
return nil, fmt.Errorf("called with wrong initial balance. wanted %d, got %d", initialBalance, ib)
}
if d != depth {
return nil, fmt.Errorf("called with wrong depth. wanted %d, got %d", depth, d)
}
if l != label {
return nil, fmt.Errorf("called with wrong label. wanted %s, got %s", label, l)
}
if sctx.GetGasPrice(ctx).Cmp(big.NewInt(10000)) != 0 {
return nil, fmt.Errorf("called with wrong gas price. wanted %d, got %d", 10000, sctx.GetGasPrice(ctx))
}
return batchID, nil
}),
)
client, _, _ := newTestServer(t, testServerOptions{
PostageContract: contract,
})
jsonhttptest.Request(t, client, http.MethodPost, createBatch(initialBalance, depth, label), http.StatusCreated,
jsonhttptest.WithRequestHeader("Gas-Price", "10000"),
jsonhttptest.WithExpectedJSONResponse(&api.PostageCreateResponse{
BatchID: batchID,
}),
)
})
t.Run("with-error", func(t *testing.T) {
contract := contractMock.New(
contractMock.WithCreateBatchFunc(func(ctx context.Context, ib *big.Int, d uint8, i bool, l string) ([]byte, error) {
return nil, errors.New("err")
}),
)
client, _, _ := newTestServer(t, testServerOptions{
PostageContract: contract,
})
jsonhttptest.Request(t, client, http.MethodPost, createBatch(initialBalance, depth, label), http.StatusInternalServerError,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusInternalServerError,
Message: "cannot create batch",
}),
)
})
t.Run("out-of-funds", func(t *testing.T) {
contract := contractMock.New(
contractMock.WithCreateBatchFunc(func(ctx context.Context, ib *big.Int, d uint8, i bool, l string) ([]byte, error) {
return nil, postagecontract.ErrInsufficientFunds
}),
)
client, _, _ := newTestServer(t, testServerOptions{
PostageContract: contract,
})
jsonhttptest.Request(t, client, http.MethodPost, createBatch(initialBalance, depth, label), http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "out of funds",
}),
)
})
t.Run("invalid depth", func(t *testing.T) {
client, _, _ := newTestServer(t, testServerOptions{})
jsonhttptest.Request(t, client, http.MethodPost, "/stamps/1000/ab", http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid depth",
}),
)
})
t.Run("depth less than bucket depth", func(t *testing.T) {
contract := contractMock.New(
contractMock.WithCreateBatchFunc(func(ctx context.Context, ib *big.Int, d uint8, i bool, l string) ([]byte, error) {
return nil, postagecontract.ErrInvalidDepth
}),
)
client, _, _ := newTestServer(t, testServerOptions{
PostageContract: contract,
})
jsonhttptest.Request(t, client, http.MethodPost, "/stamps/1000/9", http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid depth",
}),
)
})
t.Run("invalid balance", func(t *testing.T) {
client, _, _ := newTestServer(t, testServerOptions{})
jsonhttptest.Request(t, client, http.MethodPost, "/stamps/abcd/2", http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid postage amount",
}),
)
})
t.Run("immutable header", func(t *testing.T) {
var immutable bool
contract := contractMock.New(
contractMock.WithCreateBatchFunc(func(ctx context.Context, _ *big.Int, _ uint8, i bool, _ string) ([]byte, error) {
immutable = i
return batchID, nil
}),
)
client, _, _ := newTestServer(t, testServerOptions{
PostageContract: contract,
})
jsonhttptest.Request(t, client, http.MethodPost, "/stamps/1000/24", http.StatusCreated,
jsonhttptest.WithRequestHeader("Immutable", "true"),
jsonhttptest.WithExpectedJSONResponse(&api.PostageCreateResponse{
BatchID: batchID,
}),
)
if !immutable {
t.Fatalf("want true, got %v", immutable)
}
})
}
func TestPostageGetStamps(t *testing.T) {
si := postage.NewStampIssuer("", "", batchOk, big.NewInt(3), 11, 10, 1000, true)
mp := mockpost.New(mockpost.WithIssuer(si))
client, _, _ := newTestServer(t, testServerOptions{Post: mp})
jsonhttptest.Request(t, client, http.MethodGet, "/stamps", http.StatusOK,
jsonhttptest.WithExpectedJSONResponse(&api.PostageStampsResponse{
Stamps: []api.PostageStampResponse{
{
BatchID: batchOk,
Utilization: si.Utilization(),
Usable: true,
Label: si.Label(),
Depth: si.Depth(),
Amount: bigint.Wrap(si.Amount()),
BucketDepth: si.BucketDepth(),
BlockNumber: si.BlockNumber(),
ImmutableFlag: si.ImmutableFlag(),
},
},
}),
)
}
func TestPostageGetStamp(t *testing.T) {
si := postage.NewStampIssuer("", "", batchOk, big.NewInt(3), 11, 10, 1000, true)
mp := mockpost.New(mockpost.WithIssuer(si))
client, _, _ := newTestServer(t, testServerOptions{Post: mp})
t.Run("ok", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodGet, "/stamps/"+batchOkStr, http.StatusOK,
jsonhttptest.WithExpectedJSONResponse(&api.PostageStampResponse{
BatchID: batchOk,
Utilization: si.Utilization(),
Usable: true,
Label: si.Label(),
Depth: si.Depth(),
Amount: bigint.Wrap(si.Amount()),
BucketDepth: si.BucketDepth(),
BlockNumber: si.BlockNumber(),
ImmutableFlag: si.ImmutableFlag(),
}),
)
})
t.Run("ok", func(t *testing.T) {
badBatch := []byte{0, 1, 2}
jsonhttptest.Request(t, client, http.MethodGet, "/stamps/"+hex.EncodeToString(badBatch), http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid batchID",
}),
)
})
t.Run("ok", func(t *testing.T) {
badBatch := []byte{0, 1, 2, 4}
jsonhttptest.Request(t, client, http.MethodGet, "/stamps/"+hex.EncodeToString(badBatch), http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid batchID",
}),
)
})
}
...@@ -78,10 +78,7 @@ func (s *server) pssPostHandler(w http.ResponseWriter, r *http.Request) { ...@@ -78,10 +78,7 @@ func (s *server) pssPostHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
batch, err := requestPostageBatchId(r) batch, err := requestPostageBatchId(r)
switch { if err != nil {
case errors.Is(err, errSwarmPostageBatchIDHeaderNotFound) && s.post.DefaultIssuer() != nil:
batch = s.post.DefaultIssuer().ID()
case err != nil:
s.logger.Debugf("pss: postage batch id: %v", err) s.logger.Debugf("pss: postage batch id: %v", err)
s.logger.Error("pss: postage batch id") s.logger.Error("pss: postage batch id")
jsonhttp.BadRequest(w, "invalid postage batch id") jsonhttp.BadRequest(w, "invalid postage batch id")
......
...@@ -235,9 +235,9 @@ func TestPssSend(t *testing.T) { ...@@ -235,9 +235,9 @@ func TestPssSend(t *testing.T) {
jsonhttptest.WithRequestBody(bytes.NewReader(payload)), jsonhttptest.WithRequestBody(bytes.NewReader(payload)),
) )
}) })
t.Run("ok batch - batch empty", func(t *testing.T) { t.Run("bad request - batch empty", func(t *testing.T) {
hexbatch := hex.EncodeToString(batchEmpty) hexbatch := hex.EncodeToString(batchEmpty)
jsonhttptest.Request(t, client, http.MethodPost, "/pss/send/to/12", http.StatusCreated, jsonhttptest.Request(t, client, http.MethodPost, "/pss/send/to/12", http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch), jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
jsonhttptest.WithRequestBody(bytes.NewReader(payload)), jsonhttptest.WithRequestBody(bytes.NewReader(payload)),
) )
......
...@@ -156,6 +156,27 @@ func (s *server) setupRouting() { ...@@ -156,6 +156,27 @@ func (s *server) setupRouting() {
})), })),
) )
handle("/stamps", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.postageGetStampsHandler),
})),
)
handle("/stamps/{id}", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.postageGetStampHandler),
})),
)
handle("/stamps/{amount}/{depth}", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"POST": http.HandlerFunc(s.postageCreateHandler),
})),
)
s.Handler = web.ChainHandlers( s.Handler = web.ChainHandlers(
httpaccess.NewHTTPAccessLogHandler(s.logger, logrus.InfoLevel, s.tracer, "api access"), httpaccess.NewHTTPAccessLogHandler(s.logger, logrus.InfoLevel, s.tracer, "api access"),
handlers.CompressHandler, handlers.CompressHandler,
......
...@@ -128,10 +128,7 @@ func (s *server) socUploadHandler(w http.ResponseWriter, r *http.Request) { ...@@ -128,10 +128,7 @@ func (s *server) socUploadHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
batch, err := requestPostageBatchId(r) batch, err := requestPostageBatchId(r)
switch { if err != nil {
case errors.Is(err, errSwarmPostageBatchIDHeaderNotFound) && s.post.DefaultIssuer() != nil:
batch = s.post.DefaultIssuer().ID()
case err != nil:
s.logger.Debugf("soc upload: postage batch id: %v", err) s.logger.Debugf("soc upload: postage batch id: %v", err)
s.logger.Error("soc upload: postage batch id") s.logger.Error("soc upload: postage batch id")
jsonhttp.BadRequest(w, "invalid postage batch id") jsonhttp.BadRequest(w, "invalid postage batch id")
......
...@@ -161,10 +161,10 @@ func TestSOC(t *testing.T) { ...@@ -161,10 +161,10 @@ func TestSOC(t *testing.T) {
jsonhttptest.WithRequestBody(bytes.NewReader(s.WrappedChunk.Data())), jsonhttptest.WithRequestBody(bytes.NewReader(s.WrappedChunk.Data())),
) )
}) })
t.Run("ok - batch empty", func(t *testing.T) { t.Run("err - batch empty", func(t *testing.T) {
s := testingsoc.GenerateMockSOC(t, testData) s := testingsoc.GenerateMockSOC(t, testData)
hexbatch := hex.EncodeToString(batchEmpty) hexbatch := hex.EncodeToString(batchEmpty)
jsonhttptest.Request(t, client, http.MethodPost, socResource(hex.EncodeToString(s.Owner), hex.EncodeToString(s.ID), hex.EncodeToString(s.Signature)), http.StatusCreated, jsonhttptest.Request(t, client, http.MethodPost, socResource(hex.EncodeToString(s.Owner), hex.EncodeToString(s.ID), hex.EncodeToString(s.Signature)), http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch), jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
jsonhttptest.WithRequestBody(bytes.NewReader(s.WrappedChunk.Data())), jsonhttptest.WithRequestBody(bytes.NewReader(s.WrappedChunk.Data())),
) )
......
package config
import (
"github.com/ethereum/go-ethereum/common"
)
var (
// chain ID
goerliChainID = int64(5)
xdaiChainID = int64(100)
// start block
goerliStartBlock = uint64(4933174)
xdaiStartBlock = uint64(16515648)
// factory address
goerliContractAddress = common.HexToAddress("0x0c9de531dcb38b758fe8a2c163444a5e54ee0db2")
xdaiContractAddress = common.HexToAddress("0x0FDc5429C50e2a39066D8A94F3e2D2476fcc3b85")
goerliFactoryAddress = common.HexToAddress("0x73c412512E1cA0be3b89b77aB3466dA6A1B9d273")
xdaiFactoryAddress = common.HexToAddress("0xc2d5a532cf69aa9a1378737d8ccdef884b6e7420")
goerliLegacyFactoryAddress = common.HexToAddress("0xf0277caffea72734853b834afc9892461ea18474")
// postage stamp
goerliPostageStampContractAddress = common.HexToAddress("0x621e455C4a139f5C4e4A8122Ce55Dc21630769E4")
xdaiPostageStampContractAddress = common.HexToAddress("0x6a1a21eca3ab28be85c7ba22b2d6eae5907c900e")
)
type ChainConfig struct {
StartBlock uint64
LegacyFactories []common.Address
PostageStamp common.Address
CurrentFactory common.Address
PriceOracleAddress common.Address
}
func GetChainConfig(chainID int64) (*ChainConfig, bool) {
var cfg ChainConfig
switch chainID {
case goerliChainID:
cfg.PostageStamp = goerliPostageStampContractAddress
cfg.StartBlock = goerliStartBlock
cfg.CurrentFactory = goerliFactoryAddress
cfg.LegacyFactories = []common.Address{
goerliLegacyFactoryAddress,
}
cfg.PriceOracleAddress = goerliContractAddress
return &cfg, true
case xdaiChainID:
cfg.PostageStamp = xdaiPostageStampContractAddress
cfg.StartBlock = xdaiStartBlock
cfg.CurrentFactory = xdaiFactoryAddress
cfg.LegacyFactories = []common.Address{}
cfg.PriceOracleAddress = xdaiContractAddress
return &cfg, true
default:
return &cfg, false
}
}
...@@ -7,6 +7,7 @@ package clef ...@@ -7,6 +7,7 @@ package clef
import ( import (
"crypto/ecdsa" "crypto/ecdsa"
"errors" "errors"
"fmt"
"math/big" "math/big"
"os" "os"
"path/filepath" "path/filepath"
...@@ -129,7 +130,16 @@ func (c *clefSigner) Sign(data []byte) ([]byte, error) { ...@@ -129,7 +130,16 @@ func (c *clefSigner) Sign(data []byte) ([]byte, error) {
// SignTx signs an ethereum transaction. // SignTx signs an ethereum transaction.
func (c *clefSigner) SignTx(transaction *types.Transaction, chainID *big.Int) (*types.Transaction, error) { func (c *clefSigner) SignTx(transaction *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
// chainId is nil here because it is set on the clef side // chainId is nil here because it is set on the clef side
return c.clef.SignTx(c.account, transaction, nil) tx, err := c.clef.SignTx(c.account, transaction, nil)
if err != nil {
return nil, err
}
if chainID.Cmp(tx.ChainId()) != 0 {
return nil, fmt.Errorf("misconfigured signer: wrong chain id %d; wanted %d", tx.ChainId(), chainID)
}
return tx, nil
} }
// EthereumAddress returns the ethereum address this signer uses. // EthereumAddress returns the ethereum address this signer uses.
......
...@@ -34,6 +34,8 @@ type ( ...@@ -34,6 +34,8 @@ type (
PostageCreateResponse = postageCreateResponse PostageCreateResponse = postageCreateResponse
PostageStampResponse = postageStampResponse PostageStampResponse = postageStampResponse
PostageStampsResponse = postageStampsResponse PostageStampsResponse = postageStampsResponse
PostageStampBucketsResponse = postageStampBucketsResponse
BucketData = bucketData
) )
var ( var (
......
...@@ -8,6 +8,7 @@ import ( ...@@ -8,6 +8,7 @@ import (
"github.com/ethersphere/bee" "github.com/ethersphere/bee"
"github.com/ethersphere/bee/pkg/metrics" "github.com/ethersphere/bee/pkg/metrics"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
) )
func newMetricsRegistry() (r *prometheus.Registry) { func newMetricsRegistry() (r *prometheus.Registry) {
...@@ -15,10 +16,10 @@ func newMetricsRegistry() (r *prometheus.Registry) { ...@@ -15,10 +16,10 @@ func newMetricsRegistry() (r *prometheus.Registry) {
// register standard metrics // register standard metrics
r.MustRegister( r.MustRegister(
prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{ collectors.NewProcessCollector(collectors.ProcessCollectorOpts{
Namespace: metrics.Namespace, Namespace: metrics.Namespace,
}), }),
prometheus.NewGoCollector(), collectors.NewGoCollector(),
prometheus.NewGauge(prometheus.GaugeOpts{ prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: metrics.Namespace, Namespace: metrics.Namespace,
Name: "info", Name: "info",
......
...@@ -104,7 +104,7 @@ func TestConnect(t *testing.T) { ...@@ -104,7 +104,7 @@ func TestConnect(t *testing.T) {
func TestDisconnect(t *testing.T) { func TestDisconnect(t *testing.T) {
address := swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59c") address := swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59c")
unknownAdddress := swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59e") unknownAddress := swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59e")
errorAddress := swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59a") errorAddress := swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59a")
testErr := errors.New("test error") testErr := errors.New("test error")
...@@ -132,7 +132,7 @@ func TestDisconnect(t *testing.T) { ...@@ -132,7 +132,7 @@ func TestDisconnect(t *testing.T) {
}) })
t.Run("unknown", func(t *testing.T) { t.Run("unknown", func(t *testing.T) {
jsonhttptest.Request(t, testServer.Client, http.MethodDelete, "/peers/"+unknownAdddress.String(), http.StatusBadRequest, jsonhttptest.Request(t, testServer.Client, http.MethodDelete, "/peers/"+unknownAddress.String(), http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Message: "peer not found", Message: "peer not found",
......
...@@ -14,7 +14,6 @@ import ( ...@@ -14,7 +14,6 @@ import (
"github.com/ethersphere/bee/pkg/bigint" "github.com/ethersphere/bee/pkg/bigint"
"github.com/ethersphere/bee/pkg/jsonhttp" "github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/postage/postagecontract" "github.com/ethersphere/bee/pkg/postage/postagecontract"
"github.com/ethersphere/bee/pkg/sctx" "github.com/ethersphere/bee/pkg/sctx"
"github.com/gorilla/mux" "github.com/gorilla/mux"
...@@ -102,15 +101,35 @@ type postageStampResponse struct { ...@@ -102,15 +101,35 @@ type postageStampResponse struct {
BucketDepth uint8 `json:"bucketDepth"` BucketDepth uint8 `json:"bucketDepth"`
BlockNumber uint64 `json:"blockNumber"` BlockNumber uint64 `json:"blockNumber"`
ImmutableFlag bool `json:"immutableFlag"` ImmutableFlag bool `json:"immutableFlag"`
Exists bool `json:"exists"`
} }
type postageStampsResponse struct { type postageStampsResponse struct {
Stamps []postageStampResponse `json:"stamps"` Stamps []postageStampResponse `json:"stamps"`
} }
type postageStampBucketsResponse struct {
Depth uint8 `json:"depth"`
BucketDepth uint8 `json:"bucketDepth"`
BucketUpperBound uint32 `json:"bucketUpperBound"`
Buckets []bucketData `json:"buckets"`
}
type bucketData struct {
BucketID uint32 `json:"bucketID"`
Collisions uint32 `json:"collisions"`
}
func (s *Service) postageGetStampsHandler(w http.ResponseWriter, _ *http.Request) { func (s *Service) postageGetStampsHandler(w http.ResponseWriter, _ *http.Request) {
resp := postageStampsResponse{} resp := postageStampsResponse{}
for _, v := range s.post.StampIssuers() { for _, v := range s.post.StampIssuers() {
exists, err := s.post.BatchExists(v.ID())
if err != nil {
s.logger.Errorf("get stamp issuer: check batch: %v", err)
s.logger.Error("get stamp issuer: check batch")
jsonhttp.InternalServerError(w, "unable to check batch")
return
}
resp.Stamps = append(resp.Stamps, postageStampResponse{ resp.Stamps = append(resp.Stamps, postageStampResponse{
BatchID: v.ID(), BatchID: v.ID(),
Utilization: v.Utilization(), Utilization: v.Utilization(),
...@@ -121,14 +140,15 @@ func (s *Service) postageGetStampsHandler(w http.ResponseWriter, _ *http.Request ...@@ -121,14 +140,15 @@ func (s *Service) postageGetStampsHandler(w http.ResponseWriter, _ *http.Request
BucketDepth: v.BucketDepth(), BucketDepth: v.BucketDepth(),
BlockNumber: v.BlockNumber(), BlockNumber: v.BlockNumber(),
ImmutableFlag: v.ImmutableFlag(), ImmutableFlag: v.ImmutableFlag(),
Exists: exists,
}) })
} }
jsonhttp.OK(w, resp) jsonhttp.OK(w, resp)
} }
func (s *Service) postageGetStampHandler(w http.ResponseWriter, r *http.Request) { func (s *Service) postageGetStampBucketsHandler(w http.ResponseWriter, r *http.Request) {
idStr := mux.Vars(r)["id"] idStr := mux.Vars(r)["id"]
if idStr == "" || len(idStr) != 64 { if len(idStr) != 64 {
s.logger.Error("get stamp issuer: invalid batchID") s.logger.Error("get stamp issuer: invalid batchID")
jsonhttp.BadRequest(w, "invalid batchID") jsonhttp.BadRequest(w, "invalid batchID")
return return
...@@ -145,47 +165,67 @@ func (s *Service) postageGetStampHandler(w http.ResponseWriter, r *http.Request) ...@@ -145,47 +165,67 @@ func (s *Service) postageGetStampHandler(w http.ResponseWriter, r *http.Request)
if err != nil { if err != nil {
s.logger.Error("get stamp issuer: get issuer: %v", err) s.logger.Error("get stamp issuer: get issuer: %v", err)
s.logger.Error("get stamp issuer: get issuer") s.logger.Error("get stamp issuer: get issuer")
jsonhttp.BadRequest(w, "cannot get issuer") jsonhttp.BadRequest(w, "cannot get batch")
return return
} }
resp := postageStampResponse{
BatchID: id, b := issuer.Buckets()
Utilization: issuer.Utilization(), resp := postageStampBucketsResponse{
Usable: s.post.IssuerUsable(issuer), Depth: issuer.Depth(),
Label: issuer.Label(), BucketDepth: issuer.BucketDepth(),
Depth: issuer.Depth(), BucketUpperBound: issuer.BucketUpperBound(),
Amount: bigint.Wrap(issuer.Amount()), Buckets: make([]bucketData, len(b)),
BucketDepth: issuer.BucketDepth(),
BlockNumber: issuer.BlockNumber(),
ImmutableFlag: issuer.ImmutableFlag(),
} }
jsonhttp.OK(w, &resp)
for i, v := range b {
resp.Buckets[i] = bucketData{BucketID: uint32(i), Collisions: v}
}
jsonhttp.OK(w, resp)
} }
// postageSetDefaultStampIssuerHandler sets the default postage stamps issuer. func (s *Service) postageGetStampHandler(w http.ResponseWriter, r *http.Request) {
func (s *Service) postageSetDefaultStampIssuerHandler(w http.ResponseWriter, r *http.Request) {
idStr := mux.Vars(r)["id"] idStr := mux.Vars(r)["id"]
if idStr == "" || len(idStr) != 64 { if len(idStr) != 64 {
s.logger.Error("get stamp issuer: invalid batchID") s.logger.Error("get stamp issuer: invalid batchID")
jsonhttp.BadRequest(w, "invalid batchID") jsonhttp.BadRequest(w, "invalid batchID")
return return
} }
id, err := hex.DecodeString(idStr) id, err := hex.DecodeString(idStr)
if err != nil { if err != nil {
s.logger.Error("set stamp issuer: invalid batchID: %v", err) s.logger.Errorf("get stamp issuer: invalid batchID: %v", err)
s.logger.Error("set stamp issuer: invalid batchID") s.logger.Error("get stamp issuer: invalid batchID")
jsonhttp.BadRequest(w, "invalid batchID") jsonhttp.BadRequest(w, "invalid batchID")
return return
} }
switch err := s.post.SetDefaultIssuer(id); { issuer, err := s.post.GetStampIssuer(id)
case errors.Is(err, postage.ErrNotFound): if err != nil {
jsonhttp.NotFound(w, nil) s.logger.Errorf("get stamp issuer: get issuer: %v", err)
case err != nil: s.logger.Error("get stamp issuer: get issuer")
s.logger.Debugf("debug api: set default stamp issuer: %v", err) jsonhttp.BadRequest(w, "cannot get issuer")
jsonhttp.InternalServerError(w, err) return
}
exists, err := s.post.BatchExists(id)
if err != nil {
s.logger.Errorf("get stamp issuer: check batch: %v", err)
s.logger.Error("get stamp issuer: check batch")
jsonhttp.InternalServerError(w, "unable to check batch")
return
} }
resp := postageStampResponse{
BatchID: id,
Utilization: issuer.Utilization(),
Usable: s.post.IssuerUsable(issuer),
Label: issuer.Label(),
Depth: issuer.Depth(),
Amount: bigint.Wrap(issuer.Amount()),
BucketDepth: issuer.BucketDepth(),
BlockNumber: issuer.BlockNumber(),
ImmutableFlag: issuer.ImmutableFlag(),
Exists: exists,
}
jsonhttp.OK(w, &resp)
} }
type reserveStateResponse struct { type reserveStateResponse struct {
......
...@@ -211,6 +211,7 @@ func TestPostageGetStamps(t *testing.T) { ...@@ -211,6 +211,7 @@ func TestPostageGetStamps(t *testing.T) {
BucketDepth: si.BucketDepth(), BucketDepth: si.BucketDepth(),
BlockNumber: si.BlockNumber(), BlockNumber: si.BlockNumber(),
ImmutableFlag: si.ImmutableFlag(), ImmutableFlag: si.ImmutableFlag(),
Exists: true,
}, },
}, },
}), }),
...@@ -234,10 +235,11 @@ func TestPostageGetStamp(t *testing.T) { ...@@ -234,10 +235,11 @@ func TestPostageGetStamp(t *testing.T) {
BucketDepth: si.BucketDepth(), BucketDepth: si.BucketDepth(),
BlockNumber: si.BlockNumber(), BlockNumber: si.BlockNumber(),
ImmutableFlag: si.ImmutableFlag(), ImmutableFlag: si.ImmutableFlag(),
Exists: true,
}), }),
) )
}) })
t.Run("ok", func(t *testing.T) { t.Run("bad request", func(t *testing.T) {
badBatch := []byte{0, 1, 2} badBatch := []byte{0, 1, 2}
jsonhttptest.Request(t, ts.Client, http.MethodGet, "/stamps/"+hex.EncodeToString(badBatch), http.StatusBadRequest, jsonhttptest.Request(t, ts.Client, http.MethodGet, "/stamps/"+hex.EncodeToString(badBatch), http.StatusBadRequest,
...@@ -247,7 +249,7 @@ func TestPostageGetStamp(t *testing.T) { ...@@ -247,7 +249,7 @@ func TestPostageGetStamp(t *testing.T) {
}), }),
) )
}) })
t.Run("ok", func(t *testing.T) { t.Run("bad request", func(t *testing.T) {
badBatch := []byte{0, 1, 2, 4} badBatch := []byte{0, 1, 2, 4}
jsonhttptest.Request(t, ts.Client, http.MethodGet, "/stamps/"+hex.EncodeToString(badBatch), http.StatusBadRequest, jsonhttptest.Request(t, ts.Client, http.MethodGet, "/stamps/"+hex.EncodeToString(badBatch), http.StatusBadRequest,
...@@ -259,6 +261,47 @@ func TestPostageGetStamp(t *testing.T) { ...@@ -259,6 +261,47 @@ func TestPostageGetStamp(t *testing.T) {
}) })
} }
func TestPostageGetBuckets(t *testing.T) {
si := postage.NewStampIssuer("", "", batchOk, big.NewInt(3), 11, 10, 1000, true)
mp := mockpost.New(mockpost.WithIssuer(si))
ts := newTestServer(t, testServerOptions{Post: mp})
buckets := make([]debugapi.BucketData, 1024)
for i := range buckets {
buckets[i] = debugapi.BucketData{BucketID: uint32(i)}
}
t.Run("ok", func(t *testing.T) {
jsonhttptest.Request(t, ts.Client, http.MethodGet, "/stamps/"+batchOkStr+"/buckets", http.StatusOK,
jsonhttptest.WithExpectedJSONResponse(&debugapi.PostageStampBucketsResponse{
Depth: si.Depth(),
BucketDepth: si.BucketDepth(),
BucketUpperBound: si.BucketUpperBound(),
Buckets: buckets,
}),
)
})
t.Run("bad batch", func(t *testing.T) {
badBatch := []byte{0, 1, 2}
jsonhttptest.Request(t, ts.Client, http.MethodGet, "/stamps/"+hex.EncodeToString(badBatch)+"/buckets", http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid batchID",
}),
)
})
t.Run("bad batch", func(t *testing.T) {
badBatch := []byte{0, 1, 2, 4}
jsonhttptest.Request(t, ts.Client, http.MethodGet, "/stamps/"+hex.EncodeToString(badBatch)+"/buckets", http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid batchID",
}),
)
})
}
func TestReserveState(t *testing.T) { func TestReserveState(t *testing.T) {
t.Run("ok", func(t *testing.T) { t.Run("ok", func(t *testing.T) {
ts := newTestServer(t, testServerOptions{ ts := newTestServer(t, testServerOptions{
......
...@@ -198,15 +198,15 @@ func (s *Service) newRouter() *mux.Router { ...@@ -198,15 +198,15 @@ func (s *Service) newRouter() *mux.Router {
})), })),
) )
router.Handle("/stamps/{amount}/{depth}", web.ChainHandlers( router.Handle("/stamps/{id}/buckets", web.ChainHandlers(
web.FinalHandler(jsonhttp.MethodHandler{ web.FinalHandler(jsonhttp.MethodHandler{
"POST": http.HandlerFunc(s.postageCreateHandler), "GET": http.HandlerFunc(s.postageGetStampBucketsHandler),
})), })),
) )
router.Handle("/stamps/default/{id}", web.ChainHandlers( router.Handle("/stamps/{amount}/{depth}", web.ChainHandlers(
web.FinalHandler(jsonhttp.MethodHandler{ web.FinalHandler(jsonhttp.MethodHandler{
"PUT": http.HandlerFunc(s.postageSetDefaultStampIssuerHandler), "POST": http.HandlerFunc(s.postageCreateHandler),
})), })),
) )
......
...@@ -14,6 +14,7 @@ import ( ...@@ -14,6 +14,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"golang.org/x/sync/semaphore"
"sync" "sync"
"time" "time"
...@@ -23,10 +24,9 @@ import ( ...@@ -23,10 +24,9 @@ import (
"github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/protobuf" "github.com/ethersphere/bee/pkg/p2p/protobuf"
"github.com/ethersphere/bee/pkg/ratelimit"
"github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/swarm"
ma "github.com/multiformats/go-multiaddr" ma "github.com/multiformats/go-multiaddr"
"golang.org/x/time/rate"
) )
const ( const (
...@@ -38,31 +38,43 @@ const ( ...@@ -38,31 +38,43 @@ const (
) )
var ( var (
limitBurst = 4 * int(swarm.MaxBins)
limitRate = time.Minute
ErrRateLimitExceeded = errors.New("rate limit exceeded") ErrRateLimitExceeded = errors.New("rate limit exceeded")
limitBurst = 4 * int(swarm.MaxBins)
limitRate = rate.Every(time.Minute)
) )
type Service struct { type Service struct {
streamer p2p.Streamer streamer p2p.StreamerPinger
addressBook addressbook.GetPutter addressBook addressbook.GetPutter
addPeersHandler func(...swarm.Address) addPeersHandler func(...swarm.Address)
networkID uint64 networkID uint64
logger logging.Logger logger logging.Logger
metrics metrics metrics metrics
limiter map[string]*rate.Limiter inLimiter *ratelimit.Limiter
limiterLock sync.Mutex outLimiter *ratelimit.Limiter
clearMtx sync.Mutex
quit chan struct{}
wg sync.WaitGroup
peersChan chan pb.Peers
sem *semaphore.Weighted
} }
func New(streamer p2p.Streamer, addressbook addressbook.GetPutter, networkID uint64, logger logging.Logger) *Service { func New(streamer p2p.StreamerPinger, addressbook addressbook.GetPutter, networkID uint64, logger logging.Logger) *Service {
return &Service{ svc := &Service{
streamer: streamer, streamer: streamer,
logger: logger, logger: logger,
addressBook: addressbook, addressBook: addressbook,
networkID: networkID, networkID: networkID,
metrics: newMetrics(), metrics: newMetrics(),
limiter: make(map[string]*rate.Limiter), inLimiter: ratelimit.New(limitRate, limitBurst),
outLimiter: ratelimit.New(limitRate, limitBurst),
quit: make(chan struct{}),
peersChan: make(chan pb.Peers),
sem: semaphore.NewWeighted(int64(31)),
} }
svc.startCheckPeersHandler()
return svc
} }
func (s *Service) Protocol() p2p.ProtocolSpec { func (s *Service) Protocol() p2p.ProtocolSpec {
...@@ -89,6 +101,12 @@ func (s *Service) BroadcastPeers(ctx context.Context, addressee swarm.Address, p ...@@ -89,6 +101,12 @@ func (s *Service) BroadcastPeers(ctx context.Context, addressee swarm.Address, p
if max > len(peers) { if max > len(peers) {
max = len(peers) max = len(peers)
} }
// If broadcasting limit is exceeded, return early
if !s.outLimiter.Allow(addressee.ByteString(), max) {
return nil
}
if err := s.sendPeers(ctx, addressee, peers[:max]); err != nil { if err := s.sendPeers(ctx, addressee, peers[:max]); err != nil {
return err return err
} }
...@@ -103,6 +121,23 @@ func (s *Service) SetAddPeersHandler(h func(addr ...swarm.Address)) { ...@@ -103,6 +121,23 @@ func (s *Service) SetAddPeersHandler(h func(addr ...swarm.Address)) {
s.addPeersHandler = h s.addPeersHandler = h
} }
func (s *Service) Close() error {
close(s.quit)
stopped := make(chan struct{})
go func() {
defer close(stopped)
s.wg.Wait()
}()
select {
case <-stopped:
return nil
case <-time.After(time.Second * 5):
return errors.New("hive: waited 5 seconds to close active goroutines")
}
}
func (s *Service) sendPeers(ctx context.Context, peer swarm.Address, peers []swarm.Address) (err error) { func (s *Service) sendPeers(ctx context.Context, peer swarm.Address, peers []swarm.Address) (err error) {
s.metrics.BroadcastPeersSends.Inc() s.metrics.BroadcastPeersSends.Inc()
stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, peersStreamName) stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, peersStreamName)
...@@ -158,9 +193,9 @@ func (s *Service) peersHandler(ctx context.Context, peer p2p.Peer, stream p2p.St ...@@ -158,9 +193,9 @@ func (s *Service) peersHandler(ctx context.Context, peer p2p.Peer, stream p2p.St
s.metrics.PeersHandlerPeers.Add(float64(len(peersReq.Peers))) s.metrics.PeersHandlerPeers.Add(float64(len(peersReq.Peers)))
if err := s.rateLimitPeer(peer.Address, len(peersReq.Peers)); err != nil { if !s.inLimiter.Allow(peer.Address.ByteString(), len(peersReq.Peers)) {
_ = stream.Reset() _ = stream.Reset()
return err return ErrRateLimitExceeded
} }
// close the stream before processing in order to unblock the sending side // close the stream before processing in order to unblock the sending side
...@@ -168,63 +203,108 @@ func (s *Service) peersHandler(ctx context.Context, peer p2p.Peer, stream p2p.St ...@@ -168,63 +203,108 @@ func (s *Service) peersHandler(ctx context.Context, peer p2p.Peer, stream p2p.St
// but we still want to handle not closed stream from the other side to avoid zombie stream // but we still want to handle not closed stream from the other side to avoid zombie stream
go stream.FullClose() go stream.FullClose()
var peers []swarm.Address select {
for _, newPeer := range peersReq.Peers { case s.peersChan <- peersReq:
case <-s.quit:
multiUnderlay, err := ma.NewMultiaddrBytes(newPeer.Underlay) return errors.New("failed to process peers, shutting down hive")
if err != nil { }
s.logger.Errorf("hive: multi address underlay err: %v", err)
continue
}
bzzAddress := bzz.Address{ return nil
Overlay: swarm.NewAddress(newPeer.Overlay), }
Underlay: multiUnderlay,
Signature: newPeer.Signature,
Transaction: newPeer.Transaction,
}
err = s.addressBook.Put(bzzAddress.Overlay, bzzAddress) func (s *Service) disconnect(peer p2p.Peer) error {
if err != nil {
s.logger.Warningf("skipping peer in response %s: %v", newPeer.String(), err)
continue
}
peers = append(peers, bzzAddress.Overlay) s.clearMtx.Lock()
} defer s.clearMtx.Unlock()
if s.addPeersHandler != nil { s.inLimiter.Clear(peer.Address.ByteString())
s.addPeersHandler(peers...) s.outLimiter.Clear(peer.Address.ByteString())
}
return nil return nil
} }
func (s *Service) rateLimitPeer(peer swarm.Address, count int) error { func (s *Service) startCheckPeersHandler() {
ctx, cancel := context.WithCancel(context.Background())
s.wg.Add(1)
go func() {
defer s.wg.Done()
<-s.quit
cancel()
}()
s.limiterLock.Lock() s.wg.Add(1)
defer s.limiterLock.Unlock() go func() {
defer s.wg.Done()
for {
select {
case <-ctx.Done():
return
case newPeers := <-s.peersChan:
s.wg.Add(1)
go func() {
defer s.wg.Done()
s.checkAndAddPeers(ctx, newPeers)
}()
}
}
}()
}
addr := peer.ByteString() func (s *Service) checkAndAddPeers(ctx context.Context, peers pb.Peers) {
limiter, ok := s.limiter[addr] var peersToAdd []swarm.Address
if !ok { mtx := sync.Mutex{}
limiter = rate.NewLimiter(limitRate, limitBurst) wg := sync.WaitGroup{}
s.limiter[addr] = limiter
}
if limiter.AllowN(time.Now(), count) { for _, p := range peers.Peers {
return nil err := s.sem.Acquire(ctx, 1)
} if err != nil {
return
}
return ErrRateLimitExceeded wg.Add(1)
} go func(newPeer *pb.BzzAddress) {
defer func() {
s.sem.Release(1)
wg.Done()
}()
multiUnderlay, err := ma.NewMultiaddrBytes(newPeer.Underlay)
if err != nil {
s.logger.Errorf("hive: multi address underlay err: %v", err)
return
}
func (s *Service) disconnect(peer p2p.Peer) error { // check if the underlay is usable by doing a raw ping using libp2p
s.limiterLock.Lock() _, err = s.streamer.Ping(ctx, multiUnderlay)
defer s.limiterLock.Unlock() if err != nil {
s.metrics.UnreachablePeers.Inc()
s.logger.Warningf("hive: multi address underlay %s not reachable err: %w", multiUnderlay, err)
return
}
delete(s.limiter, peer.Address.String()) bzzAddress := bzz.Address{
Overlay: swarm.NewAddress(newPeer.Overlay),
Underlay: multiUnderlay,
Signature: newPeer.Signature,
Transaction: newPeer.Transaction,
}
return nil err = s.addressBook.Put(bzzAddress.Overlay, bzzAddress)
if err != nil {
s.logger.Warningf("skipping peer in response %s: %v", newPeer.String(), err)
return
}
mtx.Lock()
peersToAdd = append(peersToAdd, bzzAddress.Overlay)
mtx.Unlock()
}(p)
}
wg.Wait()
if s.addPeersHandler != nil && len(peersToAdd) > 0 {
s.addPeersHandler(peersToAdd...)
}
} }
...@@ -46,8 +46,10 @@ func TestHandlerRateLimit(t *testing.T) { ...@@ -46,8 +46,10 @@ func TestHandlerRateLimit(t *testing.T) {
addressbookclean := ab.New(mock.NewStateStore()) addressbookclean := ab.New(mock.NewStateStore())
// new recorder for handling Ping
streamer := streamtest.New()
// create a hive server that handles the incoming stream // create a hive server that handles the incoming stream
server := hive.New(nil, addressbookclean, networkID, logger) server := hive.New(streamer, addressbookclean, networkID, logger)
serverAddress := test.RandomAddress() serverAddress := test.RandomAddress()
...@@ -98,8 +100,9 @@ func TestHandlerRateLimit(t *testing.T) { ...@@ -98,8 +100,9 @@ func TestHandlerRateLimit(t *testing.T) {
} }
lastRec := rec[len(rec)-1] lastRec := rec[len(rec)-1]
if !errors.Is(lastRec.Err(), hive.ErrRateLimitExceeded) {
t.Fatal(err) if lastRec.Err() != nil {
t.Fatal("want nil error")
} }
} }
...@@ -160,6 +163,7 @@ func TestBroadcastPeers(t *testing.T) { ...@@ -160,6 +163,7 @@ func TestBroadcastPeers(t *testing.T) {
wantMsgs []pb.Peers wantMsgs []pb.Peers
wantOverlays []swarm.Address wantOverlays []swarm.Address
wantBzzAddresses []bzz.Address wantBzzAddresses []bzz.Address
pingErr func(addr ma.Multiaddr) (time.Duration, error)
}{ }{
"OK - single record": { "OK - single record": {
addresee: swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59c"), addresee: swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59c"),
...@@ -196,14 +200,36 @@ func TestBroadcastPeers(t *testing.T) { ...@@ -196,14 +200,36 @@ func TestBroadcastPeers(t *testing.T) {
wantOverlays: overlays[:2*hive.MaxBatchSize], wantOverlays: overlays[:2*hive.MaxBatchSize],
wantBzzAddresses: bzzAddresses[:2*hive.MaxBatchSize], wantBzzAddresses: bzzAddresses[:2*hive.MaxBatchSize],
}, },
"OK - single batch - skip ping failures": {
addresee: swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59c"),
peers: overlays[:15],
wantMsgs: []pb.Peers{{Peers: wantMsgs[0].Peers[:15]}},
wantOverlays: overlays[:10],
wantBzzAddresses: bzzAddresses[:10],
pingErr: func(addr ma.Multiaddr) (rtt time.Duration, err error) {
for _, v := range bzzAddresses[10:15] {
if v.Underlay.Equal(addr) {
return rtt, errors.New("ping failure")
}
}
return rtt, nil
},
},
} }
for name, tc := range testCases { for name, tc := range testCases {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
addressbookclean := ab.New(mock.NewStateStore()) addressbookclean := ab.New(mock.NewStateStore())
// new recorder for handling Ping
var streamer *streamtest.Recorder
if tc.pingErr != nil {
streamer = streamtest.New(streamtest.WithPingErr(tc.pingErr))
} else {
streamer = streamtest.New()
}
// create a hive server that handles the incoming stream // create a hive server that handles the incoming stream
server := hive.New(nil, addressbookclean, networkID, logger) server := hive.New(streamer, addressbookclean, networkID, logger)
// setup the stream recorder to record stream data // setup the stream recorder to record stream data
recorder := streamtest.New( recorder := streamtest.New(
......
...@@ -16,6 +16,7 @@ type metrics struct { ...@@ -16,6 +16,7 @@ type metrics struct {
PeersHandler prometheus.Counter PeersHandler prometheus.Counter
PeersHandlerPeers prometheus.Counter PeersHandlerPeers prometheus.Counter
UnreachablePeers prometheus.Counter
} }
func newMetrics() metrics { func newMetrics() metrics {
...@@ -52,6 +53,12 @@ func newMetrics() metrics { ...@@ -52,6 +53,12 @@ func newMetrics() metrics {
Name: "peers_handler_peers_count", Name: "peers_handler_peers_count",
Help: "Number of peers received in peer messages.", Help: "Number of peers received in peer messages.",
}), }),
UnreachablePeers: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: m.Namespace,
Subsystem: subsystem,
Name: "unreachable_peers_count",
Help: "Number of peers that are unreachable.",
}),
} }
} }
......
...@@ -137,8 +137,8 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e ...@@ -137,8 +137,8 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e
if err != nil { if err != nil {
return nil, err return nil, err
} }
gcSizeChange += c
} }
gcSizeChange += c
} }
case storage.ModePutSync: case storage.ModePutSync:
......
...@@ -260,6 +260,7 @@ func TestModePutUpload(t *testing.T) { ...@@ -260,6 +260,7 @@ func TestModePutUpload(t *testing.T) {
newPinIndexTest(db, ch, leveldb.ErrNotFound)(t) newPinIndexTest(db, ch, leveldb.ErrNotFound)(t)
} }
newItemsCountTest(db.postageIndexIndex, tc.count)(t) newItemsCountTest(db.postageIndexIndex, tc.count)(t)
newIndexGCSizeTest(db)(t)
}) })
} }
} }
...@@ -298,6 +299,7 @@ func TestModePutUploadPin(t *testing.T) { ...@@ -298,6 +299,7 @@ func TestModePutUploadPin(t *testing.T) {
newPinIndexTest(db, ch, nil)(t) newPinIndexTest(db, ch, nil)(t)
} }
newItemsCountTest(db.postageIndexIndex, tc.count)(t) newItemsCountTest(db.postageIndexIndex, tc.count)(t)
newIndexGCSizeTest(db)(t)
}) })
} }
} }
...@@ -484,6 +486,7 @@ func TestModePut_sameChunk(t *testing.T) { ...@@ -484,6 +486,7 @@ func TestModePut_sameChunk(t *testing.T) {
newItemsCountTest(db.retrievalDataIndex, tc.count)(t) newItemsCountTest(db.retrievalDataIndex, tc.count)(t)
newItemsCountTest(db.pullIndex, count(tcn.pullIndex))(t) newItemsCountTest(db.pullIndex, count(tcn.pullIndex))(t)
newItemsCountTest(db.pushIndex, count(tcn.pushIndex))(t) newItemsCountTest(db.pushIndex, count(tcn.pushIndex))(t)
newIndexGCSizeTest(db)(t)
} }
}) })
} }
......
...@@ -261,7 +261,11 @@ func (db *DB) setRemove(batch *leveldb.Batch, item shed.Item, check bool) (gcSiz ...@@ -261,7 +261,11 @@ func (db *DB) setRemove(batch *leveldb.Batch, item shed.Item, check bool) (gcSiz
func (db *DB) setPin(batch *leveldb.Batch, item shed.Item) (gcSizeChange int64, err error) { func (db *DB) setPin(batch *leveldb.Batch, item shed.Item) (gcSizeChange int64, err error) {
// Get the existing pin counter of the chunk // Get the existing pin counter of the chunk
i, err := db.pinIndex.Get(item) i, err := db.pinIndex.Get(item)
// this will not panic because shed.Index.Get returns an instance, not a pointer.
// we therefore leverage the default value of the pin counter on the item (zero).
item.PinCounter = i.PinCounter item.PinCounter = i.PinCounter
if err != nil { if err != nil {
if !errors.Is(err, leveldb.ErrNotFound) { if !errors.Is(err, leveldb.ErrNotFound) {
return 0, err return 0, err
......
...@@ -14,6 +14,7 @@ import ( ...@@ -14,6 +14,7 @@ import (
"fmt" "fmt"
"github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/recovery" "github.com/ethersphere/bee/pkg/recovery"
"github.com/ethersphere/bee/pkg/retrieval" "github.com/ethersphere/bee/pkg/retrieval"
"github.com/ethersphere/bee/pkg/sctx" "github.com/ethersphere/bee/pkg/sctx"
...@@ -25,7 +26,7 @@ type store struct { ...@@ -25,7 +26,7 @@ type store struct {
storage.Storer storage.Storer
retrieval retrieval.Interface retrieval retrieval.Interface
logger logging.Logger logger logging.Logger
validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error) validStamp postage.ValidStampFn
recoveryCallback recovery.Callback // this is the callback to be executed when a chunk fails to be retrieved recoveryCallback recovery.Callback // this is the callback to be executed when a chunk fails to be retrieved
} }
...@@ -34,7 +35,7 @@ var ( ...@@ -34,7 +35,7 @@ var (
) )
// New returns a new NetStore that wraps a given Storer. // New returns a new NetStore that wraps a given Storer.
func New(s storage.Storer, validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error), rcb recovery.Callback, r retrieval.Interface, logger logging.Logger) storage.Storer { func New(s storage.Storer, validStamp postage.ValidStampFn, rcb recovery.Callback, r retrieval.Interface, logger logging.Logger) storage.Storer {
return &store{Storer: s, validStamp: validStamp, recoveryCallback: rcb, retrieval: r, logger: logger} return &store{Storer: s, validStamp: validStamp, recoveryCallback: rcb, retrieval: r, logger: logger}
} }
......
...@@ -16,6 +16,7 @@ import ( ...@@ -16,6 +16,7 @@ import (
"github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/netstore" "github.com/ethersphere/bee/pkg/netstore"
"github.com/ethersphere/bee/pkg/postage"
postagetesting "github.com/ethersphere/bee/pkg/postage/testing" postagetesting "github.com/ethersphere/bee/pkg/postage/testing"
"github.com/ethersphere/bee/pkg/pss" "github.com/ethersphere/bee/pkg/pss"
"github.com/ethersphere/bee/pkg/recovery" "github.com/ethersphere/bee/pkg/recovery"
...@@ -186,7 +187,7 @@ func TestInvalidPostageStamp(t *testing.T) { ...@@ -186,7 +187,7 @@ func TestInvalidPostageStamp(t *testing.T) {
} }
// returns a mock retrieval protocol, a mock local storage and a netstore // returns a mock retrieval protocol, a mock local storage and a netstore
func newRetrievingNetstore(rec recovery.Callback, validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error)) (ret *retrievalMock, mockStore *mock.MockStorer, ns storage.Storer) { func newRetrievingNetstore(rec recovery.Callback, validStamp postage.ValidStampFn) (ret *retrievalMock, mockStore *mock.MockStorer, ns storage.Storer) {
retrieve := &retrievalMock{} retrieve := &retrievalMock{}
store := mock.NewStorer() store := mock.NewStorer()
logger := logging.New(ioutil.Discard, 0) logger := logging.New(ioutil.Discard, 0)
......
...@@ -15,6 +15,7 @@ import ( ...@@ -15,6 +15,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethersphere/bee/pkg/config"
"github.com/ethersphere/bee/pkg/crypto" "github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p/libp2p" "github.com/ethersphere/bee/pkg/p2p/libp2p"
...@@ -82,7 +83,9 @@ func InitChequebookFactory( ...@@ -82,7 +83,9 @@ func InitChequebookFactory(
var currentFactory common.Address var currentFactory common.Address
var legacyFactories []common.Address var legacyFactories []common.Address
foundFactory, foundLegacyFactories, found := chequebook.DiscoverFactoryAddress(chainID) chainCfg, found := config.GetChainConfig(chainID)
foundFactory, foundLegacyFactories := chainCfg.CurrentFactory, chainCfg.LegacyFactories
if factoryAddress == "" { if factoryAddress == "" {
if !found { if !found {
return nil, fmt.Errorf("no known factory address for this network (chain id: %d)", chainID) return nil, fmt.Errorf("no known factory address for this network (chain id: %d)", chainID)
...@@ -211,8 +214,8 @@ func InitSwap( ...@@ -211,8 +214,8 @@ func InitSwap(
var currentPriceOracleAddress common.Address var currentPriceOracleAddress common.Address
if priceOracleAddress == "" { if priceOracleAddress == "" {
var found bool chainCfg, found := config.GetChainConfig(chainID)
currentPriceOracleAddress, found = priceoracle.DiscoverPriceOracleAddress(chainID) currentPriceOracleAddress = chainCfg.PriceOracleAddress
if !found { if !found {
return nil, nil, errors.New("no known price oracle address for this network") return nil, nil, errors.New("no known price oracle address for this network")
} }
......
...@@ -29,6 +29,7 @@ import ( ...@@ -29,6 +29,7 @@ import (
"github.com/ethersphere/bee/pkg/accounting" "github.com/ethersphere/bee/pkg/accounting"
"github.com/ethersphere/bee/pkg/addressbook" "github.com/ethersphere/bee/pkg/addressbook"
"github.com/ethersphere/bee/pkg/api" "github.com/ethersphere/bee/pkg/api"
"github.com/ethersphere/bee/pkg/config"
"github.com/ethersphere/bee/pkg/crypto" "github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/debugapi" "github.com/ethersphere/bee/pkg/debugapi"
"github.com/ethersphere/bee/pkg/feeds/factory" "github.com/ethersphere/bee/pkg/feeds/factory"
...@@ -75,6 +76,7 @@ import ( ...@@ -75,6 +76,7 @@ import (
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
ma "github.com/multiformats/go-multiaddr" ma "github.com/multiformats/go-multiaddr"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/crypto/sha3"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
...@@ -106,6 +108,7 @@ type Bee struct { ...@@ -106,6 +108,7 @@ type Bee struct {
listenerCloser io.Closer listenerCloser io.Closer
postageServiceCloser io.Closer postageServiceCloser io.Closer
priceOracleCloser io.Closer priceOracleCloser io.Closer
hiveCloser io.Closer
shutdownInProgress bool shutdownInProgress bool
shutdownMutex sync.Mutex shutdownMutex sync.Mutex
} }
...@@ -127,7 +130,6 @@ type Options struct { ...@@ -127,7 +130,6 @@ type Options struct {
Bootnodes []string Bootnodes []string
CORSAllowedOrigins []string CORSAllowedOrigins []string
Logger logging.Logger Logger logging.Logger
Standalone bool
TracingEnabled bool TracingEnabled bool
TracingEndpoint string TracingEndpoint string
TracingServiceName string TracingServiceName string
...@@ -136,6 +138,7 @@ type Options struct { ...@@ -136,6 +138,7 @@ type Options struct {
PaymentTolerance string PaymentTolerance string
PaymentEarly string PaymentEarly string
ResolverConnectionCfgs []multiresolver.ConnectionConfig ResolverConnectionCfgs []multiresolver.ConnectionConfig
RetrievalCaching bool
GatewayMode bool GatewayMode bool
BootnodeMode bool BootnodeMode bool
SwapEndpoint string SwapEndpoint string
...@@ -151,6 +154,7 @@ type Options struct { ...@@ -151,6 +154,7 @@ type Options struct {
BlockTime uint64 BlockTime uint64
DeployGasPrice string DeployGasPrice string
WarmupTime time.Duration WarmupTime time.Duration
ChainID int64
} }
const ( const (
...@@ -211,21 +215,23 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo ...@@ -211,21 +215,23 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
cashoutService chequebook.CashoutService cashoutService chequebook.CashoutService
pollingInterval = time.Duration(o.BlockTime) * time.Second pollingInterval = time.Duration(o.BlockTime) * time.Second
) )
if !o.Standalone { swapBackend, overlayEthAddress, chainID, transactionMonitor, transactionService, err = InitChain(
swapBackend, overlayEthAddress, chainID, transactionMonitor, transactionService, err = InitChain( p2pCtx,
p2pCtx, logger,
logger, stateStore,
stateStore, o.SwapEndpoint,
o.SwapEndpoint, signer,
signer, pollingInterval,
pollingInterval, )
) if err != nil {
if err != nil { return nil, fmt.Errorf("init chain: %w", err)
return nil, fmt.Errorf("init chain: %w", err) }
} b.ethClientCloser = swapBackend.Close
b.ethClientCloser = swapBackend.Close b.transactionCloser = tracerCloser
b.transactionCloser = tracerCloser b.transactionMonitorCloser = transactionMonitor
b.transactionMonitorCloser = transactionMonitor
if o.ChainID != -1 && o.ChainID != chainID {
return nil, fmt.Errorf("connected to wrong ethereum network: got chainID %d, want %d", chainID, o.ChainID)
} }
var debugAPIService *debugapi.Service var debugAPIService *debugapi.Service
...@@ -261,19 +267,17 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo ...@@ -261,19 +267,17 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
b.debugAPIServer = debugAPIServer b.debugAPIServer = debugAPIServer
} }
if !o.Standalone { // Sync the with the given Ethereum backend:
// Sync the with the given Ethereum backend: isSynced, _, err := transaction.IsSynced(p2pCtx, swapBackend, maxDelay)
isSynced, _, err := transaction.IsSynced(p2pCtx, swapBackend, maxDelay) if err != nil {
if err != nil { return nil, fmt.Errorf("is synced: %w", err)
return nil, fmt.Errorf("is synced: %w", err) }
} if !isSynced {
if !isSynced { logger.Infof("waiting to sync with the Ethereum backend")
logger.Infof("waiting to sync with the Ethereum backend")
err := transaction.WaitSynced(logger, p2pCtx, swapBackend, maxDelay) err := transaction.WaitSynced(p2pCtx, logger, swapBackend, maxDelay)
if err != nil { if err != nil {
return nil, fmt.Errorf("waiting backend sync: %w", err) return nil, fmt.Errorf("waiting backend sync: %w", err)
}
} }
} }
...@@ -365,7 +369,6 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo ...@@ -365,7 +369,6 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
NATAddr: o.NATAddr, NATAddr: o.NATAddr,
EnableWS: o.EnableWS, EnableWS: o.EnableWS,
EnableQUIC: o.EnableQUIC, EnableQUIC: o.EnableQUIC,
Standalone: o.Standalone,
WelcomeMessage: o.WelcomeMessage, WelcomeMessage: o.WelcomeMessage,
FullNode: o.FullNodeMode, FullNode: o.FullNodeMode,
Transaction: txHash, Transaction: txHash,
...@@ -425,52 +428,52 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo ...@@ -425,52 +428,52 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
) )
var postageSyncStart uint64 = 0 var postageSyncStart uint64 = 0
if !o.Standalone { chainCfg, found := config.GetChainConfig(chainID)
postageContractAddress, startBlock, found := listener.DiscoverAddresses(chainID) postageContractAddress, startBlock := chainCfg.PostageStamp, chainCfg.StartBlock
if o.PostageContractAddress != "" { if o.PostageContractAddress != "" {
if !common.IsHexAddress(o.PostageContractAddress) { if !common.IsHexAddress(o.PostageContractAddress) {
return nil, errors.New("malformed postage stamp address") return nil, errors.New("malformed postage stamp address")
}
postageContractAddress = common.HexToAddress(o.PostageContractAddress)
} else if !found {
return nil, errors.New("no known postage stamp addresses for this network")
}
if found {
postageSyncStart = startBlock
} }
postageContractAddress = common.HexToAddress(o.PostageContractAddress)
} else if !found {
return nil, errors.New("no known postage stamp addresses for this network")
}
if found {
postageSyncStart = startBlock
}
eventListener = listener.New(logger, swapBackend, postageContractAddress, o.BlockTime, &pidKiller{node: b}) eventListener = listener.New(logger, swapBackend, postageContractAddress, o.BlockTime, &pidKiller{node: b})
b.listenerCloser = eventListener b.listenerCloser = eventListener
batchSvc = batchservice.New(stateStore, batchStore, logger, eventListener, overlayEthAddress.Bytes(), post)
erc20Address, err := postagecontract.LookupERC20Address(p2pCtx, transactionService, postageContractAddress) batchSvc, err = batchservice.New(stateStore, batchStore, logger, eventListener, overlayEthAddress.Bytes(), post, sha3.New256)
if err != nil { if err != nil {
return nil, err return nil, err
} }
postageContractService = postagecontract.New( erc20Address, err := postagecontract.LookupERC20Address(p2pCtx, transactionService, postageContractAddress)
overlayEthAddress, if err != nil {
postageContractAddress, return nil, err
erc20Address,
transactionService,
post,
)
} }
if !o.Standalone { postageContractService = postagecontract.New(
if natManager := p2ps.NATManager(); natManager != nil { overlayEthAddress,
// wait for nat manager to init postageContractAddress,
logger.Debug("initializing NAT manager") erc20Address,
select { transactionService,
case <-natManager.Ready(): post,
// this is magic sleep to give NAT time to sync the mappings )
// this is a hack, kind of alchemy and should be improved
time.Sleep(3 * time.Second) if natManager := p2ps.NATManager(); natManager != nil {
logger.Debug("NAT manager initialized") // wait for nat manager to init
case <-time.After(10 * time.Second): logger.Debug("initializing NAT manager")
logger.Warning("NAT manager init timeout") select {
} case <-natManager.Ready():
// this is magic sleep to give NAT time to sync the mappings
// this is a hack, kind of alchemy and should be improved
time.Sleep(3 * time.Second)
logger.Debug("NAT manager initialized")
case <-time.After(10 * time.Second):
logger.Warning("NAT manager init timeout")
} }
} }
...@@ -485,21 +488,19 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo ...@@ -485,21 +488,19 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
if err = p2ps.AddProtocol(hive.Protocol()); err != nil { if err = p2ps.AddProtocol(hive.Protocol()); err != nil {
return nil, fmt.Errorf("hive service: %w", err) return nil, fmt.Errorf("hive service: %w", err)
} }
b.hiveCloser = hive
var bootnodes []ma.Multiaddr var bootnodes []ma.Multiaddr
if o.Standalone {
logger.Info("Starting node in standalone mode, no p2p connections will be made or accepted")
} else {
for _, a := range o.Bootnodes {
addr, err := ma.NewMultiaddr(a)
if err != nil {
logger.Debugf("multiaddress fail %s: %v", a, err)
logger.Warningf("invalid bootnode address %s", a)
continue
}
bootnodes = append(bootnodes, addr) for _, a := range o.Bootnodes {
addr, err := ma.NewMultiaddr(a)
if err != nil {
logger.Debugf("multiaddress fail %s: %v", a, err)
logger.Warningf("invalid bootnode address %s", a)
continue
} }
bootnodes = append(bootnodes, addr)
} }
var swapService *swap.Service var swapService *swap.Service
...@@ -509,7 +510,7 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo ...@@ -509,7 +510,7 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
return nil, fmt.Errorf("unable to create metrics storage for kademlia: %w", err) return nil, fmt.Errorf("unable to create metrics storage for kademlia: %w", err)
} }
kad := kademlia.New(swarmAddress, addressbook, hive, p2ps, metricsDB, logger, kademlia.Options{Bootnodes: bootnodes, StandaloneMode: o.Standalone, BootnodeMode: o.BootnodeMode}) kad := kademlia.New(swarmAddress, addressbook, hive, p2ps, metricsDB, logger, kademlia.Options{Bootnodes: bootnodes, BootnodeMode: o.BootnodeMode})
b.topologyCloser = kad b.topologyCloser = kad
b.topologyHalter = kad b.topologyHalter = kad
hive.SetAddPeersHandler(kad.AddPeers) hive.SetAddPeersHandler(kad.AddPeers)
...@@ -621,7 +622,7 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo ...@@ -621,7 +622,7 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
pricing.SetPaymentThresholdObserver(acc) pricing.SetPaymentThresholdObserver(acc)
retrieve := retrieval.New(swarmAddress, storer, p2ps, kad, logger, acc, pricer, tracer) retrieve := retrieval.New(swarmAddress, storer, p2ps, kad, logger, acc, pricer, tracer, o.RetrievalCaching, validStamp)
tagService := tags.NewTags(stateStore, logger) tagService := tags.NewTags(stateStore, logger)
b.tagsCloser = tagService b.tagsCloser = tagService
...@@ -748,6 +749,7 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo ...@@ -748,6 +749,7 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
debugAPIService.MustRegisterMetrics(pullStorage.Metrics()...) debugAPIService.MustRegisterMetrics(pullStorage.Metrics()...)
debugAPIService.MustRegisterMetrics(retrieve.Metrics()...) debugAPIService.MustRegisterMetrics(retrieve.Metrics()...)
debugAPIService.MustRegisterMetrics(lightNodes.Metrics()...) debugAPIService.MustRegisterMetrics(lightNodes.Metrics()...)
debugAPIService.MustRegisterMetrics(hive.Metrics()...)
if bs, ok := batchStore.(metrics.Collector); ok { if bs, ok := batchStore.(metrics.Collector); ok {
debugAPIService.MustRegisterMetrics(bs.Metrics()...) debugAPIService.MustRegisterMetrics(bs.Metrics()...)
...@@ -846,7 +848,7 @@ func (b *Bee) Shutdown(ctx context.Context) error { ...@@ -846,7 +848,7 @@ func (b *Bee) Shutdown(ctx context.Context) error {
b.recoveryHandleCleanup() b.recoveryHandleCleanup()
} }
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(5) wg.Add(6)
go func() { go func() {
defer wg.Done() defer wg.Done()
tryClose(b.pssCloser, "pss") tryClose(b.pssCloser, "pss")
...@@ -869,6 +871,10 @@ func (b *Bee) Shutdown(ctx context.Context) error { ...@@ -869,6 +871,10 @@ func (b *Bee) Shutdown(ctx context.Context) error {
defer wg.Done() defer wg.Done()
tryClose(b.pullSyncCloser, "pull sync") tryClose(b.pullSyncCloser, "pull sync")
}() }()
go func() {
defer wg.Done()
tryClose(b.hiveCloser, "pull sync")
}()
wg.Wait() wg.Wait()
......
...@@ -56,7 +56,7 @@ var ( ...@@ -56,7 +56,7 @@ var (
// AdvertisableAddressResolver can Resolve a Multiaddress. // AdvertisableAddressResolver can Resolve a Multiaddress.
type AdvertisableAddressResolver interface { type AdvertisableAddressResolver interface {
Resolve(observedAdddress ma.Multiaddr) (ma.Multiaddr, error) Resolve(observedAddress ma.Multiaddr) (ma.Multiaddr, error)
} }
type SenderMatcher interface { type SenderMatcher interface {
......
...@@ -729,7 +729,7 @@ type AdvertisableAddresserMock struct { ...@@ -729,7 +729,7 @@ type AdvertisableAddresserMock struct {
err error err error
} }
func (a *AdvertisableAddresserMock) Resolve(observedAdddress ma.Multiaddr) (ma.Multiaddr, error) { func (a *AdvertisableAddresserMock) Resolve(observedAddress ma.Multiaddr) (ma.Multiaddr, error) {
if a.err != nil { if a.err != nil {
return nil, a.err return nil, a.err
} }
...@@ -738,7 +738,7 @@ func (a *AdvertisableAddresserMock) Resolve(observedAdddress ma.Multiaddr) (ma.M ...@@ -738,7 +738,7 @@ func (a *AdvertisableAddresserMock) Resolve(observedAdddress ma.Multiaddr) (ma.M
return a.advertisableAddress, nil return a.advertisableAddress, nil
} }
return observedAdddress, nil return observedAddress, nil
} }
type MockSenderMatcher struct { type MockSenderMatcher struct {
......
...@@ -37,6 +37,7 @@ import ( ...@@ -37,6 +37,7 @@ import (
libp2pquic "github.com/libp2p/go-libp2p-quic-transport" libp2pquic "github.com/libp2p/go-libp2p-quic-transport"
tptu "github.com/libp2p/go-libp2p-transport-upgrader" tptu "github.com/libp2p/go-libp2p-transport-upgrader"
basichost "github.com/libp2p/go-libp2p/p2p/host/basic" basichost "github.com/libp2p/go-libp2p/p2p/host/basic"
libp2pping "github.com/libp2p/go-libp2p/p2p/protocol/ping"
"github.com/libp2p/go-tcp-transport" "github.com/libp2p/go-tcp-transport"
ws "github.com/libp2p/go-ws-transport" ws "github.com/libp2p/go-ws-transport"
ma "github.com/multiformats/go-multiaddr" ma "github.com/multiformats/go-multiaddr"
...@@ -56,6 +57,7 @@ type Service struct { ...@@ -56,6 +57,7 @@ type Service struct {
natManager basichost.NATManager natManager basichost.NATManager
natAddrResolver *staticAddressResolver natAddrResolver *staticAddressResolver
autonatDialer host.Host autonatDialer host.Host
pingDialer host.Host
libp2pPeerstore peerstore.Peerstore libp2pPeerstore peerstore.Peerstore
metrics metrics metrics metrics
networkID uint64 networkID uint64
...@@ -87,7 +89,6 @@ type Options struct { ...@@ -87,7 +89,6 @@ type Options struct {
NATAddr string NATAddr string
EnableWS bool EnableWS bool
EnableQUIC bool EnableQUIC bool
Standalone bool
FullNode bool FullNode bool
LightNodeLimit int LightNodeLimit int
WelcomeMessage string WelcomeMessage string
...@@ -178,10 +179,6 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay ...@@ -178,10 +179,6 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
transports = append(transports, libp2p.Transport(libp2pquic.NewTransport)) transports = append(transports, libp2p.Transport(libp2pquic.NewTransport))
} }
if o.Standalone {
opts = append(opts, libp2p.NoListenAddrs)
}
opts = append(opts, transports...) opts = append(opts, transports...)
h, err := libp2p.New(ctx, opts...) h, err := libp2p.New(ctx, opts...)
...@@ -222,6 +219,16 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay ...@@ -222,6 +219,16 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
return nil, fmt.Errorf("handshake service: %w", err) return nil, fmt.Errorf("handshake service: %w", err)
} }
// Create a new dialer for libp2p ping protocol. This ensures that the protocol
// uses a different set of keys to do ping. It prevents inconsistencies in peerstore as
// the addresses used are not dialable and hence should be cleaned up. We should create
// this host with the same transports and security options to be able to dial to other
// peers.
pingDialer, err := libp2p.New(ctx, append(transports, security, libp2p.NoListenAddrs)...)
if err != nil {
return nil, err
}
peerRegistry := newPeerRegistry() peerRegistry := newPeerRegistry()
s := &Service{ s := &Service{
ctx: ctx, ctx: ctx,
...@@ -229,6 +236,7 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay ...@@ -229,6 +236,7 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
natManager: natManager, natManager: natManager,
natAddrResolver: natAddrResolver, natAddrResolver: natAddrResolver,
autonatDialer: dialer, autonatDialer: dialer,
pingDialer: pingDialer,
handshakeService: handshakeService, handshakeService: handshakeService,
libp2pPeerstore: libp2pPeerstore, libp2pPeerstore: libp2pPeerstore,
metrics: newMetrics(), metrics: newMetrics(),
...@@ -795,6 +803,9 @@ func (s *Service) Close() error { ...@@ -795,6 +803,9 @@ func (s *Service) Close() error {
if err := s.autonatDialer.Close(); err != nil { if err := s.autonatDialer.Close(); err != nil {
return err return err
} }
if err := s.pingDialer.Close(); err != nil {
return err
}
return s.host.Close() return s.host.Close()
} }
...@@ -816,3 +827,20 @@ func (s *Service) Ready() { ...@@ -816,3 +827,20 @@ func (s *Service) Ready() {
func (s *Service) Halt() { func (s *Service) Halt() {
close(s.halt) close(s.halt)
} }
func (s *Service) Ping(ctx context.Context, addr ma.Multiaddr) (rtt time.Duration, err error) {
info, err := libp2ppeer.AddrInfoFromP2pAddr(addr)
if err != nil {
return rtt, fmt.Errorf("unable to parse underlay address: %w", err)
}
// Add the address to libp2p peerstore for it to be dialable
s.pingDialer.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.TempAddrTTL)
select {
case <-ctx.Done():
return rtt, ctx.Err()
case res := <-libp2pping.Ping(ctx, s.pingDialer, info.ID):
return res.RTT, res.Error
}
}
...@@ -378,6 +378,30 @@ func TestConnectDisconnectEvents(t *testing.T) { ...@@ -378,6 +378,30 @@ func TestConnectDisconnectEvents(t *testing.T) {
} }
func TestPing(t *testing.T) {
t.Skip("test flaking")
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
s1, _ := newService(t, 1, libp2pServiceOpts{})
s2, _ := newService(t, 1, libp2pServiceOpts{})
// Wait for listeners to start. There are times when the test fails unexpectedly
// during CI and we suspect it is due to the listeners not starting in time. The
// sleep here ensures CPU is given up for any goroutines which are not getting
// scheduled. Ideally we should explicitly check the TCP status on the port
// where the libp2p.Host is started before assuming the host is up. This seems like
// a bit of an overkill here unless the test starts flaking.
time.Sleep(time.Second)
addr := serviceUnderlayAddress(t, s1)
if _, err := s2.Ping(ctx, addr); err != nil {
t.Fatal(err)
}
}
const ( const (
testProtocolName = "testing" testProtocolName = "testing"
testProtocolVersion = "2.3.4" testProtocolVersion = "2.3.4"
......
...@@ -70,6 +70,18 @@ type StreamerDisconnecter interface { ...@@ -70,6 +70,18 @@ type StreamerDisconnecter interface {
Disconnecter Disconnecter
} }
// Pinger interface is used to ping a underlay address which is not yet known to the bee node.
// It uses libp2p's default ping protocol. This is different from the PingPong protocol as this
// is meant to be used before we know a particular underlay and we can consider it useful
type Pinger interface {
Ping(ctx context.Context, addr ma.Multiaddr) (rtt time.Duration, err error)
}
type StreamerPinger interface {
Streamer
Pinger
}
// Stream represent a bidirectional data Stream. // Stream represent a bidirectional data Stream.
type Stream interface { type Stream interface {
io.ReadWriter io.ReadWriter
......
...@@ -14,6 +14,7 @@ import ( ...@@ -14,6 +14,7 @@ import (
"github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/swarm"
ma "github.com/multiformats/go-multiaddr"
) )
var ( var (
...@@ -37,6 +38,7 @@ type Recorder struct { ...@@ -37,6 +38,7 @@ type Recorder struct {
protocols []p2p.ProtocolSpec protocols []p2p.ProtocolSpec
middlewares []p2p.HandlerMiddleware middlewares []p2p.HandlerMiddleware
streamErr func(swarm.Address, string, string, string) error streamErr func(swarm.Address, string, string, string) error
pingErr func(ma.Multiaddr) (time.Duration, error)
protocolsWithPeers map[string]p2p.ProtocolSpec protocolsWithPeers map[string]p2p.ProtocolSpec
} }
...@@ -76,6 +78,12 @@ func WithStreamError(streamErr func(swarm.Address, string, string, string) error ...@@ -76,6 +78,12 @@ func WithStreamError(streamErr func(swarm.Address, string, string, string) error
}) })
} }
func WithPingErr(pingErr func(ma.Multiaddr) (time.Duration, error)) Option {
return optionFunc(func(r *Recorder) {
r.pingErr = pingErr
})
}
func New(opts ...Option) *Recorder { func New(opts ...Option) *Recorder {
r := &Recorder{ r := &Recorder{
records: make(map[string][]*Record), records: make(map[string][]*Record),
...@@ -153,6 +161,13 @@ func (r *Recorder) NewStream(ctx context.Context, addr swarm.Address, h p2p.Head ...@@ -153,6 +161,13 @@ func (r *Recorder) NewStream(ctx context.Context, addr swarm.Address, h p2p.Head
return streamOut, nil return streamOut, nil
} }
func (r *Recorder) Ping(ctx context.Context, addr ma.Multiaddr) (rtt time.Duration, err error) {
if r.pingErr != nil {
return r.pingErr(addr)
}
return rtt, err
}
func (r *Recorder) Records(addr swarm.Address, protocolName, protocolVersio, streamName string) ([]*Record, error) { func (r *Recorder) Records(addr swarm.Address, protocolName, protocolVersio, streamName string) ([]*Record, error) {
id := addr.String() + p2p.NewSwarmStreamName(protocolName, protocolVersio, streamName) id := addr.String() + p2p.NewSwarmStreamName(protocolName, protocolVersio, streamName)
......
...@@ -18,6 +18,7 @@ import ( ...@@ -18,6 +18,7 @@ import (
"github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/streamtest" "github.com/ethersphere/bee/pkg/p2p/streamtest"
"github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/swarm"
ma "github.com/multiformats/go-multiaddr"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
...@@ -758,6 +759,28 @@ func TestRecorder_withStreamError(t *testing.T) { ...@@ -758,6 +759,28 @@ func TestRecorder_withStreamError(t *testing.T) {
}, nil) }, nil)
} }
func TestRecorder_ping(t *testing.T) {
testAddr, _ := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0")
rec := streamtest.New()
_, err := rec.Ping(context.Background(), testAddr)
if err != nil {
t.Fatalf("unable to ping err: %s", err.Error())
}
rec2 := streamtest.New(
streamtest.WithPingErr(func(_ ma.Multiaddr) (rtt time.Duration, err error) {
return rtt, errors.New("fail")
}),
)
_, err = rec2.Ping(context.Background(), testAddr)
if err == nil {
t.Fatal("expected ping err")
}
}
const ( const (
testProtocolName = "testing" testProtocolName = "testing"
testProtocolVersion = "1.0.1" testProtocolVersion = "1.0.1"
......
...@@ -9,14 +9,19 @@ import ( ...@@ -9,14 +9,19 @@ import (
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt" "fmt"
"hash"
"math/big" "math/big"
"github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/postage" "github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage"
"golang.org/x/crypto/sha3"
) )
const dirtyDBKey = "batchservice_dirty_db" const (
dirtyDBKey = "batchservice_dirty_db"
checksumDBKey = "batchservice_checksum"
)
type batchService struct { type batchService struct {
stateStore storage.StateStorer stateStore storage.StateStorer
...@@ -25,6 +30,8 @@ type batchService struct { ...@@ -25,6 +30,8 @@ type batchService struct {
listener postage.Listener listener postage.Listener
owner []byte owner []byte
batchListener postage.BatchCreationListener batchListener postage.BatchCreationListener
checksum hash.Hash // checksum hasher
} }
type Interface interface { type Interface interface {
...@@ -39,13 +46,40 @@ func New( ...@@ -39,13 +46,40 @@ func New(
listener postage.Listener, listener postage.Listener,
owner []byte, owner []byte,
batchListener postage.BatchCreationListener, batchListener postage.BatchCreationListener,
) Interface { checksumFunc func() hash.Hash,
return &batchService{stateStore, storer, logger, listener, owner, batchListener} ) (Interface, error) {
if checksumFunc == nil {
checksumFunc = sha3.New256
}
var (
b string
sum = checksumFunc()
)
if err := stateStore.Get(checksumDBKey, &b); err != nil {
if !errors.Is(err, storage.ErrNotFound) {
return nil, err
}
} else {
s, err := hex.DecodeString(b)
if err != nil {
return nil, err
}
n, err := sum.Write(s)
if err != nil {
return nil, err
}
if n != len(s) {
return nil, errors.New("batchstore checksum init")
}
}
return &batchService{stateStore, storer, logger, listener, owner, batchListener, sum}, nil
} }
// Create will create a new batch with the given ID, owner value and depth and // Create will create a new batch with the given ID, owner value and depth and
// stores it in the BatchStore. // stores it in the BatchStore.
func (svc *batchService) Create(id, owner []byte, normalisedBalance *big.Int, depth, bucketDepth uint8, immutable bool) error { func (svc *batchService) Create(id, owner []byte, normalisedBalance *big.Int, depth, bucketDepth uint8, immutable bool, txHash []byte) error {
b := &postage.Batch{ b := &postage.Batch{
ID: id, ID: id,
Owner: owner, Owner: owner,
...@@ -64,14 +98,18 @@ func (svc *batchService) Create(id, owner []byte, normalisedBalance *big.Int, de ...@@ -64,14 +98,18 @@ func (svc *batchService) Create(id, owner []byte, normalisedBalance *big.Int, de
if bytes.Equal(svc.owner, owner) && svc.batchListener != nil { if bytes.Equal(svc.owner, owner) && svc.batchListener != nil {
svc.batchListener.Handle(b) svc.batchListener.Handle(b)
} }
cs, err := svc.updateChecksum(txHash)
if err != nil {
return fmt.Errorf("update checksum: %w", err)
}
svc.logger.Debugf("batch service: created batch id %s", hex.EncodeToString(b.ID)) svc.logger.Debugf("batch service: created batch id %s, tx %x, checksum %x", hex.EncodeToString(b.ID), txHash, cs)
return nil return nil
} }
// TopUp implements the EventUpdater interface. It tops ups a batch with the // TopUp implements the EventUpdater interface. It tops ups a batch with the
// given ID with the given amount. // given ID with the given amount.
func (svc *batchService) TopUp(id []byte, normalisedBalance *big.Int) error { func (svc *batchService) TopUp(id []byte, normalisedBalance *big.Int, txHash []byte) error {
b, err := svc.storer.Get(id) b, err := svc.storer.Get(id)
if err != nil { if err != nil {
return fmt.Errorf("get: %w", err) return fmt.Errorf("get: %w", err)
...@@ -81,14 +119,18 @@ func (svc *batchService) TopUp(id []byte, normalisedBalance *big.Int) error { ...@@ -81,14 +119,18 @@ func (svc *batchService) TopUp(id []byte, normalisedBalance *big.Int) error {
if err != nil { if err != nil {
return fmt.Errorf("put: %w", err) return fmt.Errorf("put: %w", err)
} }
cs, err := svc.updateChecksum(txHash)
if err != nil {
return fmt.Errorf("update checksum: %w", err)
}
svc.logger.Debugf("batch service: topped up batch id %s from %v to %v", hex.EncodeToString(b.ID), b.Value, normalisedBalance) svc.logger.Debugf("batch service: topped up batch id %s from %v to %v, tx %x, checksum %x", hex.EncodeToString(b.ID), b.Value, normalisedBalance, txHash, cs)
return nil return nil
} }
// UpdateDepth implements the EventUpdater inteface. It sets the new depth of a // UpdateDepth implements the EventUpdater inteface. It sets the new depth of a
// batch with the given ID. // batch with the given ID.
func (svc *batchService) UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int) error { func (svc *batchService) UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int, txHash []byte) error {
b, err := svc.storer.Get(id) b, err := svc.storer.Get(id)
if err != nil { if err != nil {
return fmt.Errorf("get: %w", err) return fmt.Errorf("get: %w", err)
...@@ -97,21 +139,30 @@ func (svc *batchService) UpdateDepth(id []byte, depth uint8, normalisedBalance * ...@@ -97,21 +139,30 @@ func (svc *batchService) UpdateDepth(id []byte, depth uint8, normalisedBalance *
if err != nil { if err != nil {
return fmt.Errorf("put: %w", err) return fmt.Errorf("put: %w", err)
} }
cs, err := svc.updateChecksum(txHash)
if err != nil {
return fmt.Errorf("update checksum: %w", err)
}
svc.logger.Debugf("batch service: updated depth of batch id %s from %d to %d", hex.EncodeToString(b.ID), b.Depth, depth) svc.logger.Debugf("batch service: updated depth of batch id %s from %d to %d, tx %x, checksum %x", hex.EncodeToString(b.ID), b.Depth, depth, txHash, cs)
return nil return nil
} }
// UpdatePrice implements the EventUpdater interface. It sets the current // UpdatePrice implements the EventUpdater interface. It sets the current
// price from the chain in the service chain state. // price from the chain in the service chain state.
func (svc *batchService) UpdatePrice(price *big.Int) error { func (svc *batchService) UpdatePrice(price *big.Int, txHash []byte) error {
cs := svc.storer.GetChainState() cs := svc.storer.GetChainState()
cs.CurrentPrice = price cs.CurrentPrice = price
if err := svc.storer.PutChainState(cs); err != nil { if err := svc.storer.PutChainState(cs); err != nil {
return fmt.Errorf("put chain state: %w", err) return fmt.Errorf("put chain state: %w", err)
} }
svc.logger.Debugf("batch service: updated chain price to %s", price) sum, err := svc.updateChecksum(txHash)
if err != nil {
return fmt.Errorf("update checksum: %w", err)
}
svc.logger.Debugf("batch service: updated chain price to %s, tx %x, checksum %x", price, txHash, sum)
return nil return nil
} }
...@@ -161,3 +212,29 @@ func (svc *batchService) Start(startBlock uint64) (<-chan struct{}, error) { ...@@ -161,3 +212,29 @@ func (svc *batchService) Start(startBlock uint64) (<-chan struct{}, error) {
} }
return svc.listener.Listen(startBlock+1, svc), nil return svc.listener.Listen(startBlock+1, svc), nil
} }
// updateChecksum updates the batchservice checksum once an event gets
// processed. It swaps the existing checksum which is in the hasher
// with the new checksum and persists it in the statestore.
func (svc *batchService) updateChecksum(txHash []byte) ([]byte, error) {
n, err := svc.checksum.Write(txHash)
if err != nil {
return nil, err
}
if l := len(txHash); l != n {
return nil, fmt.Errorf("update checksum wrote %d bytes but want %d bytes", n, l)
}
s := svc.checksum.Sum(nil)
svc.checksum.Reset()
n, err = svc.checksum.Write(s)
if err != nil {
return nil, err
}
if l := len(s); l != n {
return nil, fmt.Errorf("swap checksum wrote %d bytes but want %d bytes", n, l)
}
b := hex.EncodeToString(s)
return s, svc.stateStore.Put(checksumDBKey, b)
}
...@@ -7,6 +7,7 @@ package batchservice_test ...@@ -7,6 +7,7 @@ package batchservice_test
import ( import (
"bytes" "bytes"
"errors" "errors"
"hash"
"io/ioutil" "io/ioutil"
"math/big" "math/big"
"math/rand" "math/rand"
...@@ -22,8 +23,9 @@ import ( ...@@ -22,8 +23,9 @@ import (
) )
var ( var (
testLog = logging.New(ioutil.Discard, 0) testLog = logging.New(ioutil.Discard, 0)
errTest = errors.New("fails") errTest = errors.New("fails")
testTxHash = make([]byte, 32)
) )
type mockListener struct { type mockListener struct {
...@@ -51,6 +53,7 @@ func TestBatchServiceCreate(t *testing.T) { ...@@ -51,6 +53,7 @@ func TestBatchServiceCreate(t *testing.T) {
testBatch := postagetesting.MustNewBatch() testBatch := postagetesting.MustNewBatch()
testBatchListener := &mockBatchCreationHandler{} testBatchListener := &mockBatchCreationHandler{}
svc, _, _ := newTestStoreAndServiceWithListener( svc, _, _ := newTestStoreAndServiceWithListener(
t,
testBatch.Owner, testBatch.Owner,
testBatchListener, testBatchListener,
mock.WithChainState(testChainState), mock.WithChainState(testChainState),
...@@ -64,6 +67,7 @@ func TestBatchServiceCreate(t *testing.T) { ...@@ -64,6 +67,7 @@ func TestBatchServiceCreate(t *testing.T) {
testBatch.Depth, testBatch.Depth,
testBatch.BucketDepth, testBatch.BucketDepth,
testBatch.Immutable, testBatch.Immutable,
testTxHash,
); err == nil { ); err == nil {
t.Fatalf("expected error") t.Fatalf("expected error")
} }
...@@ -105,6 +109,7 @@ func TestBatchServiceCreate(t *testing.T) { ...@@ -105,6 +109,7 @@ func TestBatchServiceCreate(t *testing.T) {
testBatch := postagetesting.MustNewBatch() testBatch := postagetesting.MustNewBatch()
testBatchListener := &mockBatchCreationHandler{} testBatchListener := &mockBatchCreationHandler{}
svc, batchStore, _ := newTestStoreAndServiceWithListener( svc, batchStore, _ := newTestStoreAndServiceWithListener(
t,
testBatch.Owner, testBatch.Owner,
testBatchListener, testBatchListener,
mock.WithChainState(testChainState), mock.WithChainState(testChainState),
...@@ -117,6 +122,7 @@ func TestBatchServiceCreate(t *testing.T) { ...@@ -117,6 +122,7 @@ func TestBatchServiceCreate(t *testing.T) {
testBatch.Depth, testBatch.Depth,
testBatch.BucketDepth, testBatch.BucketDepth,
testBatch.Immutable, testBatch.Immutable,
testTxHash,
); err != nil { ); err != nil {
t.Fatalf("got error %v", err) t.Fatalf("got error %v", err)
} }
...@@ -135,6 +141,7 @@ func TestBatchServiceCreate(t *testing.T) { ...@@ -135,6 +141,7 @@ func TestBatchServiceCreate(t *testing.T) {
rand.Read(owner) rand.Read(owner)
svc, batchStore, _ := newTestStoreAndServiceWithListener( svc, batchStore, _ := newTestStoreAndServiceWithListener(
t,
owner, owner,
testBatchListener, testBatchListener,
mock.WithChainState(testChainState), mock.WithChainState(testChainState),
...@@ -147,6 +154,7 @@ func TestBatchServiceCreate(t *testing.T) { ...@@ -147,6 +154,7 @@ func TestBatchServiceCreate(t *testing.T) {
testBatch.Depth, testBatch.Depth,
testBatch.BucketDepth, testBatch.BucketDepth,
testBatch.Immutable, testBatch.Immutable,
testTxHash,
); err != nil { ); err != nil {
t.Fatalf("got error %v", err) t.Fatalf("got error %v", err)
} }
...@@ -164,32 +172,34 @@ func TestBatchServiceTopUp(t *testing.T) { ...@@ -164,32 +172,34 @@ func TestBatchServiceTopUp(t *testing.T) {
t.Run("expect get error", func(t *testing.T) { t.Run("expect get error", func(t *testing.T) {
svc, _, _ := newTestStoreAndService( svc, _, _ := newTestStoreAndService(
t,
mock.WithGetErr(errTest, 0), mock.WithGetErr(errTest, 0),
) )
if err := svc.TopUp(testBatch.ID, testNormalisedBalance); err == nil { if err := svc.TopUp(testBatch.ID, testNormalisedBalance, testTxHash); err == nil {
t.Fatal("expected error") t.Fatal("expected error")
} }
}) })
t.Run("expect put error", func(t *testing.T) { t.Run("expect put error", func(t *testing.T) {
svc, batchStore, _ := newTestStoreAndService( svc, batchStore, _ := newTestStoreAndService(
t,
mock.WithPutErr(errTest, 1), mock.WithPutErr(errTest, 1),
) )
putBatch(t, batchStore, testBatch) putBatch(t, batchStore, testBatch)
if err := svc.TopUp(testBatch.ID, testNormalisedBalance); err == nil { if err := svc.TopUp(testBatch.ID, testNormalisedBalance, testTxHash); err == nil {
t.Fatal("expected error") t.Fatal("expected error")
} }
}) })
t.Run("passes", func(t *testing.T) { t.Run("passes", func(t *testing.T) {
svc, batchStore, _ := newTestStoreAndService() svc, batchStore, _ := newTestStoreAndService(t)
putBatch(t, batchStore, testBatch) putBatch(t, batchStore, testBatch)
want := testNormalisedBalance want := testNormalisedBalance
if err := svc.TopUp(testBatch.ID, testNormalisedBalance); err != nil { if err := svc.TopUp(testBatch.ID, testNormalisedBalance, testTxHash); err != nil {
t.Fatalf("top up: %v", err) t.Fatalf("top up: %v", err)
} }
...@@ -211,30 +221,32 @@ func TestBatchServiceUpdateDepth(t *testing.T) { ...@@ -211,30 +221,32 @@ func TestBatchServiceUpdateDepth(t *testing.T) {
t.Run("expect get error", func(t *testing.T) { t.Run("expect get error", func(t *testing.T) {
svc, _, _ := newTestStoreAndService( svc, _, _ := newTestStoreAndService(
t,
mock.WithGetErr(errTest, 0), mock.WithGetErr(errTest, 0),
) )
if err := svc.UpdateDepth(testBatch.ID, testNewDepth, testNormalisedBalance); err == nil { if err := svc.UpdateDepth(testBatch.ID, testNewDepth, testNormalisedBalance, testTxHash); err == nil {
t.Fatal("expected get error") t.Fatal("expected get error")
} }
}) })
t.Run("expect put error", func(t *testing.T) { t.Run("expect put error", func(t *testing.T) {
svc, batchStore, _ := newTestStoreAndService( svc, batchStore, _ := newTestStoreAndService(
t,
mock.WithPutErr(errTest, 1), mock.WithPutErr(errTest, 1),
) )
putBatch(t, batchStore, testBatch) putBatch(t, batchStore, testBatch)
if err := svc.UpdateDepth(testBatch.ID, testNewDepth, testNormalisedBalance); err == nil { if err := svc.UpdateDepth(testBatch.ID, testNewDepth, testNormalisedBalance, testTxHash); err == nil {
t.Fatal("expected put error") t.Fatal("expected put error")
} }
}) })
t.Run("passes", func(t *testing.T) { t.Run("passes", func(t *testing.T) {
svc, batchStore, _ := newTestStoreAndService() svc, batchStore, _ := newTestStoreAndService(t)
putBatch(t, batchStore, testBatch) putBatch(t, batchStore, testBatch)
if err := svc.UpdateDepth(testBatch.ID, testNewDepth, testNormalisedBalance); err != nil { if err := svc.UpdateDepth(testBatch.ID, testNewDepth, testNormalisedBalance, testTxHash); err != nil {
t.Fatalf("update depth: %v", err) t.Fatalf("update depth: %v", err)
} }
...@@ -256,22 +268,24 @@ func TestBatchServiceUpdatePrice(t *testing.T) { ...@@ -256,22 +268,24 @@ func TestBatchServiceUpdatePrice(t *testing.T) {
t.Run("expect put error", func(t *testing.T) { t.Run("expect put error", func(t *testing.T) {
svc, batchStore, _ := newTestStoreAndService( svc, batchStore, _ := newTestStoreAndService(
t,
mock.WithChainState(testChainState), mock.WithChainState(testChainState),
mock.WithPutErr(errTest, 1), mock.WithPutErr(errTest, 1),
) )
putChainState(t, batchStore, testChainState) putChainState(t, batchStore, testChainState)
if err := svc.UpdatePrice(testNewPrice); err == nil { if err := svc.UpdatePrice(testNewPrice, testTxHash); err == nil {
t.Fatal("expected error") t.Fatal("expected error")
} }
}) })
t.Run("passes", func(t *testing.T) { t.Run("passes", func(t *testing.T) {
svc, batchStore, _ := newTestStoreAndService( svc, batchStore, _ := newTestStoreAndService(
t,
mock.WithChainState(testChainState), mock.WithChainState(testChainState),
) )
if err := svc.UpdatePrice(testNewPrice); err != nil { if err := svc.UpdatePrice(testNewPrice, testTxHash); err != nil {
t.Fatalf("update price: %v", err) t.Fatalf("update price: %v", err)
} }
...@@ -288,6 +302,7 @@ func TestBatchServiceUpdateBlockNumber(t *testing.T) { ...@@ -288,6 +302,7 @@ func TestBatchServiceUpdateBlockNumber(t *testing.T) {
TotalAmount: big.NewInt(100), TotalAmount: big.NewInt(100),
} }
svc, batchStore, _ := newTestStoreAndService( svc, batchStore, _ := newTestStoreAndService(
t,
mock.WithChainState(testChainState), mock.WithChainState(testChainState),
) )
...@@ -305,7 +320,7 @@ func TestBatchServiceUpdateBlockNumber(t *testing.T) { ...@@ -305,7 +320,7 @@ func TestBatchServiceUpdateBlockNumber(t *testing.T) {
} }
func TestTransactionOk(t *testing.T) { func TestTransactionOk(t *testing.T) {
svc, store, s := newTestStoreAndService() svc, store, s := newTestStoreAndService(t)
if _, err := svc.Start(10); err != nil { if _, err := svc.Start(10); err != nil {
t.Fatal(err) t.Fatal(err)
} }
...@@ -318,7 +333,10 @@ func TestTransactionOk(t *testing.T) { ...@@ -318,7 +333,10 @@ func TestTransactionOk(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
svc2 := batchservice.New(s, store, testLog, newMockListener(), nil, nil) svc2, err := batchservice.New(s, store, testLog, newMockListener(), nil, nil, nil)
if err != nil {
t.Fatal(err)
}
if _, err := svc2.Start(10); err != nil { if _, err := svc2.Start(10); err != nil {
t.Fatal(err) t.Fatal(err)
} }
...@@ -329,7 +347,7 @@ func TestTransactionOk(t *testing.T) { ...@@ -329,7 +347,7 @@ func TestTransactionOk(t *testing.T) {
} }
func TestTransactionFail(t *testing.T) { func TestTransactionFail(t *testing.T) {
svc, store, s := newTestStoreAndService() svc, store, s := newTestStoreAndService(t)
if _, err := svc.Start(10); err != nil { if _, err := svc.Start(10); err != nil {
t.Fatal(err) t.Fatal(err)
} }
...@@ -338,7 +356,10 @@ func TestTransactionFail(t *testing.T) { ...@@ -338,7 +356,10 @@ func TestTransactionFail(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
svc2 := batchservice.New(s, store, testLog, newMockListener(), nil, nil) svc2, err := batchservice.New(s, store, testLog, newMockListener(), nil, nil, nil)
if err != nil {
t.Fatal(err)
}
if _, err := svc2.Start(10); err != nil { if _, err := svc2.Start(10); err != nil {
t.Fatal(err) t.Fatal(err)
} }
...@@ -347,19 +368,47 @@ func TestTransactionFail(t *testing.T) { ...@@ -347,19 +368,47 @@ func TestTransactionFail(t *testing.T) {
t.Fatalf("expect %d reset calls got %d", 1, c) t.Fatalf("expect %d reset calls got %d", 1, c)
} }
} }
func TestChecksum(t *testing.T) {
s := mocks.NewStateStore()
store := mock.New()
mockHash := &hs{}
svc, err := batchservice.New(s, store, testLog, newMockListener(), nil, nil, func() hash.Hash { return mockHash })
if err != nil {
t.Fatal(err)
}
testNormalisedBalance := big.NewInt(2000000000000)
testBatch := postagetesting.MustNewBatch()
putBatch(t, store, testBatch)
if err := svc.TopUp(testBatch.ID, testNormalisedBalance, testTxHash); err != nil {
t.Fatalf("top up: %v", err)
}
if m := mockHash.ctr; m != 2 {
t.Fatalf("expected %d calls got %d", 2, m)
}
}
func newTestStoreAndServiceWithListener( func newTestStoreAndServiceWithListener(
t *testing.T,
owner []byte, owner []byte,
batchListener postage.BatchCreationListener, batchListener postage.BatchCreationListener,
opts ...mock.Option, opts ...mock.Option,
) (postage.EventUpdater, *mock.BatchStore, storage.StateStorer) { ) (postage.EventUpdater, *mock.BatchStore, storage.StateStorer) {
t.Helper()
s := mocks.NewStateStore() s := mocks.NewStateStore()
store := mock.New(opts...) store := mock.New(opts...)
svc := batchservice.New(s, store, testLog, newMockListener(), owner, batchListener) svc, err := batchservice.New(s, store, testLog, newMockListener(), owner, batchListener, nil)
if err != nil {
t.Fatal(err)
}
return svc, store, s return svc, store, s
} }
func newTestStoreAndService(opts ...mock.Option) (postage.EventUpdater, *mock.BatchStore, storage.StateStorer) { func newTestStoreAndService(t *testing.T, opts ...mock.Option) (postage.EventUpdater, *mock.BatchStore, storage.StateStorer) {
return newTestStoreAndServiceWithListener(nil, nil, opts...) t.Helper()
return newTestStoreAndServiceWithListener(t, nil, nil, opts...)
} }
func putBatch(t *testing.T, store postage.Storer, b *postage.Batch) { func putBatch(t *testing.T, store postage.Storer, b *postage.Batch) {
...@@ -377,3 +426,11 @@ func putChainState(t *testing.T, store postage.Storer, cs *postage.ChainState) { ...@@ -377,3 +426,11 @@ func putChainState(t *testing.T, store postage.Storer, cs *postage.ChainState) {
t.Fatalf("store put chain state: %v", err) t.Fatalf("store put chain state: %v", err)
} }
} }
type hs struct{ ctr uint8 }
func (h *hs) Write(p []byte) (n int, err error) { h.ctr++; return len(p), nil }
func (h *hs) Sum(b []byte) []byte { return []byte{h.ctr} }
func (h *hs) Reset() {}
func (h *hs) Size() int { panic("not implemented") }
func (h *hs) BlockSize() int { panic("not implemented") }
...@@ -92,7 +92,11 @@ func (bs *BatchStore) Get(id []byte) (*postage.Batch, error) { ...@@ -92,7 +92,11 @@ func (bs *BatchStore) Get(id []byte) (*postage.Batch, error) {
} }
bs.getErrDelayCnt-- bs.getErrDelayCnt--
} }
if !bytes.Equal(bs.id, id) { exists, err := bs.Exists(id)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.New("no such id") return nil, errors.New("no such id")
} }
return bs.batch, nil return bs.batch, nil
...@@ -147,6 +151,11 @@ func (bs *BatchStore) SetRadiusSetter(r postage.RadiusSetter) { ...@@ -147,6 +151,11 @@ func (bs *BatchStore) SetRadiusSetter(r postage.RadiusSetter) {
panic("not implemented") panic("not implemented")
} }
// Exists reports whether batch referenced by the give id exists.
func (bs *BatchStore) Exists(id []byte) (bool, error) {
return bytes.Equal(bs.id, id), nil
}
func (bs *BatchStore) Reset() error { func (bs *BatchStore) Reset() error {
bs.resetCallCount++ bs.resetCallCount++
return nil return nil
......
...@@ -198,6 +198,18 @@ func (s *store) SetRadiusSetter(r postage.RadiusSetter) { ...@@ -198,6 +198,18 @@ func (s *store) SetRadiusSetter(r postage.RadiusSetter) {
s.radiusSetter = r s.radiusSetter = r
} }
// Exists reports whether batch referenced by the give id exists.
func (s *store) Exists(id []byte) (bool, error) {
switch err := s.store.Get(batchKey(id), new(postage.Batch)); {
case err == nil:
return true, nil
case errors.Is(err, storage.ErrNotFound):
return false, nil
default:
return false, err
}
}
func (s *store) Reset() error { func (s *store) Reset() error {
prefix := "batchstore_" prefix := "batchstore_"
if err := s.store.Iterate(prefix, func(k, _ []byte) (bool, error) { if err := s.store.Iterate(prefix, func(k, _ []byte) (bool, error) {
......
...@@ -12,10 +12,10 @@ import ( ...@@ -12,10 +12,10 @@ import (
// EventUpdater interface definitions reflect the updates triggered by events // EventUpdater interface definitions reflect the updates triggered by events
// emitted by the postage contract on the blockchain. // emitted by the postage contract on the blockchain.
type EventUpdater interface { type EventUpdater interface {
Create(id []byte, owner []byte, normalisedBalance *big.Int, depth, bucketDepth uint8, immutable bool) error Create(id []byte, owner []byte, normalisedBalance *big.Int, depth, bucketDepth uint8, immutable bool, txHash []byte) error
TopUp(id []byte, normalisedBalance *big.Int) error TopUp(id []byte, normalisedBalance *big.Int, txHash []byte) error
UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int) error UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int, txHash []byte) error
UpdatePrice(price *big.Int) error UpdatePrice(price *big.Int, txHash []byte) error
UpdateBlockNumber(blockNumber uint64) error UpdateBlockNumber(blockNumber uint64) error
Start(startBlock uint64) (<-chan struct{}, error) Start(startBlock uint64) (<-chan struct{}, error)
...@@ -30,11 +30,12 @@ type UnreserveIteratorFn func(id []byte, radius uint8) (bool, error) ...@@ -30,11 +30,12 @@ type UnreserveIteratorFn func(id []byte, radius uint8) (bool, error)
type Storer interface { type Storer interface {
Get(id []byte) (*Batch, error) Get(id []byte) (*Batch, error)
Put(*Batch, *big.Int, uint8) error Put(*Batch, *big.Int, uint8) error
PutChainState(*ChainState) error
GetChainState() *ChainState GetChainState() *ChainState
PutChainState(*ChainState) error
GetReserveState() *ReserveState GetReserveState() *ReserveState
SetRadiusSetter(RadiusSetter) SetRadiusSetter(RadiusSetter)
Unreserve(UnreserveIteratorFn) error Unreserve(UnreserveIteratorFn) error
Exists(id []byte) (bool, error)
Reset() error Reset() error
} }
......
...@@ -118,6 +118,7 @@ func (l *listener) processEvent(e types.Log, updater postage.EventUpdater) error ...@@ -118,6 +118,7 @@ func (l *listener) processEvent(e types.Log, updater postage.EventUpdater) error
c.Depth, c.Depth,
c.BucketDepth, c.BucketDepth,
c.ImmutableFlag, c.ImmutableFlag,
e.TxHash.Bytes(),
) )
case batchTopupTopic: case batchTopupTopic:
c := &batchTopUpEvent{} c := &batchTopUpEvent{}
...@@ -129,6 +130,7 @@ func (l *listener) processEvent(e types.Log, updater postage.EventUpdater) error ...@@ -129,6 +130,7 @@ func (l *listener) processEvent(e types.Log, updater postage.EventUpdater) error
return updater.TopUp( return updater.TopUp(
c.BatchId[:], c.BatchId[:],
c.NormalisedBalance, c.NormalisedBalance,
e.TxHash.Bytes(),
) )
case batchDepthIncreaseTopic: case batchDepthIncreaseTopic:
c := &batchDepthIncreaseEvent{} c := &batchDepthIncreaseEvent{}
...@@ -141,6 +143,7 @@ func (l *listener) processEvent(e types.Log, updater postage.EventUpdater) error ...@@ -141,6 +143,7 @@ func (l *listener) processEvent(e types.Log, updater postage.EventUpdater) error
c.BatchId[:], c.BatchId[:],
c.NewDepth, c.NewDepth,
c.NormalisedBalance, c.NormalisedBalance,
e.TxHash.Bytes(),
) )
case priceUpdateTopic: case priceUpdateTopic:
c := &priceUpdateEvent{} c := &priceUpdateEvent{}
...@@ -151,6 +154,7 @@ func (l *listener) processEvent(e types.Log, updater postage.EventUpdater) error ...@@ -151,6 +154,7 @@ func (l *listener) processEvent(e types.Log, updater postage.EventUpdater) error
l.metrics.PriceCounter.Inc() l.metrics.PriceCounter.Inc()
return updater.UpdatePrice( return updater.UpdatePrice(
c.Price, c.Price,
e.TxHash.Bytes(),
) )
default: default:
l.metrics.EventErrors.Inc() l.metrics.EventErrors.Inc()
...@@ -323,27 +327,6 @@ type priceUpdateEvent struct { ...@@ -323,27 +327,6 @@ type priceUpdateEvent struct {
Price *big.Int Price *big.Int
} }
var (
GoerliChainID = int64(5)
GoerliPostageStampContractAddress = common.HexToAddress("0x621e455C4a139f5C4e4A8122Ce55Dc21630769E4")
GoerliStartBlock = uint64(4933174)
XDaiChainID = int64(100)
XDaiPostageStampContractAddress = common.HexToAddress("0x6a1a21eca3ab28be85c7ba22b2d6eae5907c900e")
XDaiStartBlock = uint64(16515648)
)
// DiscoverAddresses returns the canonical contracts for this chainID
func DiscoverAddresses(chainID int64) (postageStamp common.Address, startBlock uint64, found bool) {
switch chainID {
case GoerliChainID:
return GoerliPostageStampContractAddress, GoerliStartBlock, true
case XDaiChainID:
return XDaiPostageStampContractAddress, XDaiStartBlock, true
default:
return common.Address{}, 0, false
}
}
func totalTimeMetric(metric prometheus.Counter, start time.Time) { func totalTimeMetric(metric prometheus.Counter, start time.Time) {
totalTime := time.Since(start) totalTime := time.Since(start)
metric.Add(float64(totalTime)) metric.Add(float64(totalTime))
......
...@@ -307,7 +307,7 @@ type updater struct { ...@@ -307,7 +307,7 @@ type updater struct {
eventC chan interface{} eventC chan interface{}
} }
func (u *updater) Create(id, owner []byte, normalisedAmount *big.Int, depth, bucketDepth uint8, immutable bool) error { func (u *updater) Create(id, owner []byte, normalisedAmount *big.Int, depth, bucketDepth uint8, immutable bool, _ []byte) error {
u.eventC <- createArgs{ u.eventC <- createArgs{
id: id, id: id,
owner: owner, owner: owner,
...@@ -319,7 +319,7 @@ func (u *updater) Create(id, owner []byte, normalisedAmount *big.Int, depth, buc ...@@ -319,7 +319,7 @@ func (u *updater) Create(id, owner []byte, normalisedAmount *big.Int, depth, buc
return nil return nil
} }
func (u *updater) TopUp(id []byte, normalisedBalance *big.Int) error { func (u *updater) TopUp(id []byte, normalisedBalance *big.Int, _ []byte) error {
u.eventC <- topupArgs{ u.eventC <- topupArgs{
id: id, id: id,
normalisedBalance: normalisedBalance, normalisedBalance: normalisedBalance,
...@@ -327,7 +327,7 @@ func (u *updater) TopUp(id []byte, normalisedBalance *big.Int) error { ...@@ -327,7 +327,7 @@ func (u *updater) TopUp(id []byte, normalisedBalance *big.Int) error {
return nil return nil
} }
func (u *updater) UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int) error { func (u *updater) UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int, _ []byte) error {
u.eventC <- depthArgs{ u.eventC <- depthArgs{
id: id, id: id,
depth: depth, depth: depth,
...@@ -336,7 +336,7 @@ func (u *updater) UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int ...@@ -336,7 +336,7 @@ func (u *updater) UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int
return nil return nil
} }
func (u *updater) UpdatePrice(price *big.Int) error { func (u *updater) UpdatePrice(price *big.Int, _ []byte) error {
u.eventC <- priceArgs{price} u.eventC <- priceArgs{price}
return nil return nil
} }
......
...@@ -65,21 +65,15 @@ func (m *mockPostage) GetStampIssuer(id []byte) (*postage.StampIssuer, error) { ...@@ -65,21 +65,15 @@ func (m *mockPostage) GetStampIssuer(id []byte) (*postage.StampIssuer, error) {
return nil, errors.New("stampissuer not found") return nil, errors.New("stampissuer not found")
} }
// SetDefaultIssuer sets the default stamps issuer.
func (m *mockPostage) SetDefaultIssuer([]byte) error {
// Noop, the default is m.i.
return nil
}
// DefaultIssuer returns the default stamps issuer.
func (m *mockPostage) DefaultIssuer() *postage.StampIssuer {
return m.i
}
func (m *mockPostage) IssuerUsable(_ *postage.StampIssuer) bool { func (m *mockPostage) IssuerUsable(_ *postage.StampIssuer) bool {
return true return true
} }
// BatchExists returns always true.
func (m *mockPostage) BatchExists(_ []byte) (bool, error) {
return true, nil
}
func (m *mockPostage) Handle(_ *postage.Batch) {} func (m *mockPostage) Handle(_ *postage.Batch) {}
func (m *mockPostage) Close() error { func (m *mockPostage) Close() error {
......
...@@ -34,8 +34,7 @@ type Service interface { ...@@ -34,8 +34,7 @@ type Service interface {
StampIssuers() []*StampIssuer StampIssuers() []*StampIssuer
GetStampIssuer([]byte) (*StampIssuer, error) GetStampIssuer([]byte) (*StampIssuer, error)
IssuerUsable(*StampIssuer) bool IssuerUsable(*StampIssuer) bool
SetDefaultIssuer([]byte) error BatchExists([]byte) (bool, error)
DefaultIssuer() *StampIssuer
BatchCreationListener BatchCreationListener
io.Closer io.Closer
} }
...@@ -43,12 +42,11 @@ type Service interface { ...@@ -43,12 +42,11 @@ type Service interface {
// service handles postage batches // service handles postage batches
// stores the active batches. // stores the active batches.
type service struct { type service struct {
lock sync.Mutex lock sync.Mutex
store storage.StateStorer store storage.StateStorer
postageStore Storer postageStore Storer
chainID int64 chainID int64
issuers []*StampIssuer issuers []*StampIssuer
defaultStampIssuer *StampIssuer
} }
// NewService constructs a new Service. // NewService constructs a new Service.
...@@ -90,24 +88,6 @@ func (ps *service) Add(st *StampIssuer) { ...@@ -90,24 +88,6 @@ func (ps *service) Add(st *StampIssuer) {
ps.issuers = append(ps.issuers, st) ps.issuers = append(ps.issuers, st)
} }
// SetDefaultIssuer sets the default stamps issuer.
func (ps *service) SetDefaultIssuer(id []byte) error {
si, err := ps.GetStampIssuer(id)
if err != nil {
return err
}
ps.lock.Lock()
ps.defaultStampIssuer = si
ps.lock.Unlock()
return nil
}
// DefaultIssuer returns the default stamps issuer.
func (ps *service) DefaultIssuer() *StampIssuer {
return ps.defaultStampIssuer
}
// Handle implements the BatchCreationListener interface. This is fired on receiving // Handle implements the BatchCreationListener interface. This is fired on receiving
// a batch creation event from the blockchain listener to ensure that if a stamp // a batch creation event from the blockchain listener to ensure that if a stamp
// issuer was not created initially, we will create it here. // issuer was not created initially, we will create it here.
...@@ -144,6 +124,11 @@ func (ps *service) IssuerUsable(st *StampIssuer) bool { ...@@ -144,6 +124,11 @@ func (ps *service) IssuerUsable(st *StampIssuer) bool {
return true return true
} }
// BatchExists returns true if the batch referenced by the given id exists.
func (ps *service) BatchExists(id []byte) (bool, error) {
return ps.postageStore.Exists(id)
}
// GetStampIssuer finds a stamp issuer by batch ID. // GetStampIssuer finds a stamp issuer by batch ID.
func (ps *service) GetStampIssuer(batchID []byte) (*StampIssuer, error) { func (ps *service) GetStampIssuer(batchID []byte) (*StampIssuer, error) {
ps.lock.Lock() ps.lock.Lock()
......
...@@ -113,8 +113,10 @@ func toSignDigest(addr, batchId, index, timestamp []byte) ([]byte, error) { ...@@ -113,8 +113,10 @@ func toSignDigest(addr, batchId, index, timestamp []byte) ([]byte, error) {
return h.Sum(nil), nil return h.Sum(nil), nil
} }
type ValidStampFn func(chunk swarm.Chunk, stampBytes []byte) (swarm.Chunk, error)
// ValidStamp returns a stampvalidator function passed to protocols with chunk entrypoints. // ValidStamp returns a stampvalidator function passed to protocols with chunk entrypoints.
func ValidStamp(batchStore Storer) func(chunk swarm.Chunk, stampBytes []byte) (swarm.Chunk, error) { func ValidStamp(batchStore Storer) ValidStampFn {
return func(chunk swarm.Chunk, stampBytes []byte) (swarm.Chunk, error) { return func(chunk swarm.Chunk, stampBytes []byte) (swarm.Chunk, error) {
stamp := new(Stamp) stamp := new(Stamp)
err := stamp.UnmarshalBinary(stampBytes) err := stamp.UnmarshalBinary(stampBytes)
......
...@@ -145,6 +145,13 @@ func (si *StampIssuer) BucketDepth() uint8 { ...@@ -145,6 +145,13 @@ func (si *StampIssuer) BucketDepth() uint8 {
return si.data.BucketDepth return si.data.BucketDepth
} }
// BucketUpperBound returns the maximum number of collisions
// possible in a bucket given the batch's depth and bucket
// depth.
func (si *StampIssuer) BucketUpperBound() uint32 {
return 1 << (si.Depth() - si.BucketDepth())
}
// BlockNumber when this batch was created. // BlockNumber when this batch was created.
func (si *StampIssuer) BlockNumber() uint64 { func (si *StampIssuer) BlockNumber() uint64 {
return si.data.BlockNumber return si.data.BlockNumber
...@@ -154,3 +161,11 @@ func (si *StampIssuer) BlockNumber() uint64 { ...@@ -154,3 +161,11 @@ func (si *StampIssuer) BlockNumber() uint64 {
func (si *StampIssuer) ImmutableFlag() bool { func (si *StampIssuer) ImmutableFlag() bool {
return si.data.ImmutableFlag return si.data.ImmutableFlag
} }
func (si *StampIssuer) Buckets() []uint32 {
si.bucketMu.Lock()
b := make([]uint32, len(si.data.Buckets))
copy(b, si.data.Buckets)
si.bucketMu.Unlock()
return b
}
...@@ -49,7 +49,7 @@ type Service struct { ...@@ -49,7 +49,7 @@ type Service struct {
paymentThresholdObserver PaymentThresholdObserver paymentThresholdObserver PaymentThresholdObserver
} }
func New(streamer p2p.Streamer, logger logging.Logger, paymentThreshold *big.Int, minThreshold *big.Int) *Service { func New(streamer p2p.Streamer, logger logging.Logger, paymentThreshold, minThreshold *big.Int) *Service {
return &Service{ return &Service{
streamer: streamer, streamer: streamer,
logger: logger, logger: logger,
......
...@@ -30,11 +30,11 @@ func BenchmarkWrap(b *testing.B) { ...@@ -30,11 +30,11 @@ func BenchmarkWrap(b *testing.B) {
depth int depth int
}{ }{
{1, 1}, {1, 1},
{4, 1}, {256, 2},
{16, 1}, {8, 1},
{256, 1},
{16, 2}, {16, 2},
{64, 2}, {64, 2},
{256, 2},
{256, 3}, {256, 3},
{4096, 3}, {4096, 3},
{16384, 3}, {16384, 3},
...@@ -46,12 +46,13 @@ func BenchmarkWrap(b *testing.B) { ...@@ -46,12 +46,13 @@ func BenchmarkWrap(b *testing.B) {
b.Fatal(err) b.Fatal(err)
} }
pubkey := &key.PublicKey pubkey := &key.PublicKey
ctx := context.Background()
for _, c := range cases { for _, c := range cases {
name := fmt.Sprintf("length:%d,depth:%d", c.length, c.depth) name := fmt.Sprintf("length:%d,depth:%d", c.length, c.depth)
b.Run(name, func(b *testing.B) { b.Run(name, func(b *testing.B) {
targets := newTargets(c.length, c.depth) targets := newTargets(c.length, c.depth)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
if _, err := pss.Wrap(context.Background(), topic, msg, pubkey, targets); err != nil { if _, err := pss.Wrap(ctx, topic, msg, pubkey, targets); err != nil {
b.Fatal(err) b.Fatal(err)
} }
} }
......
...@@ -13,8 +13,7 @@ import ( ...@@ -13,8 +13,7 @@ import (
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt" "fmt"
"math" "io"
"math/big"
"github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/btcec"
"github.com/ethersphere/bee/pkg/bmtpool" "github.com/ethersphere/bee/pkg/bmtpool"
...@@ -22,6 +21,7 @@ import ( ...@@ -22,6 +21,7 @@ import (
"github.com/ethersphere/bee/pkg/encryption" "github.com/ethersphere/bee/pkg/encryption"
"github.com/ethersphere/bee/pkg/encryption/elgamal" "github.com/ethersphere/bee/pkg/encryption/elgamal"
"github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/swarm"
"golang.org/x/sync/errgroup"
) )
var ( var (
...@@ -33,8 +33,6 @@ var ( ...@@ -33,8 +33,6 @@ var (
// ErrVarLenTargets is returned when the given target list for a trojan chunk has addresses of different lengths // ErrVarLenTargets is returned when the given target list for a trojan chunk has addresses of different lengths
ErrVarLenTargets = errors.New("target list cannot have targets of different length") ErrVarLenTargets = errors.New("target list cannot have targets of different length")
maxUint32 = big.NewInt(math.MaxUint32)
) )
// Topic is the type that classifies messages, allows client applications to subscribe to // Topic is the type that classifies messages, allows client applications to subscribe to
...@@ -204,61 +202,53 @@ func contains(col Targets, elem []byte) bool { ...@@ -204,61 +202,53 @@ func contains(col Targets, elem []byte) bool {
// mine iteratively enumerates different nonces until the address (BMT hash) of the chunkhas one of the targets as its prefix // mine iteratively enumerates different nonces until the address (BMT hash) of the chunkhas one of the targets as its prefix
func mine(ctx context.Context, odd bool, f func(nonce []byte) (swarm.Chunk, error)) (swarm.Chunk, error) { func mine(ctx context.Context, odd bool, f func(nonce []byte) (swarm.Chunk, error)) (swarm.Chunk, error) {
seeds := make([]uint32, 8)
for i := range seeds {
b, err := random.Int(random.Reader, maxUint32)
if err != nil {
return nil, err
}
seeds[i] = uint32(b.Int64())
}
initnonce := make([]byte, 32) initnonce := make([]byte, 32)
for i := 0; i < 8; i++ { if _, err := io.ReadFull(random.Reader, initnonce); err != nil {
binary.LittleEndian.PutUint32(initnonce[i*4:i*4+4], seeds[i]) return nil, err
} }
if odd { if odd {
initnonce[28] |= 0x01 initnonce[28] |= 0x01
} else { } else {
initnonce[28] &= 0xfe initnonce[28] &= 0xfe
} }
seeds[7] = binary.LittleEndian.Uint32(initnonce[28:32]) ctx, cancel := context.WithCancel(ctx)
defer cancel()
quit := make(chan struct{}) eg, ctx := errgroup.WithContext(ctx)
// make both errs and result channels buffered so they never block
result := make(chan swarm.Chunk, 8) result := make(chan swarm.Chunk, 8)
errs := make(chan error, 8)
for i := 0; i < 8; i++ { for i := 0; i < 8; i++ {
go func(j int) { eg.Go(func() error {
nonce := make([]byte, 32) nonce := make([]byte, 32)
copy(nonce, initnonce) copy(nonce, initnonce)
for seed := seeds[j]; ; seed++ { for {
binary.LittleEndian.PutUint32(nonce[j*4:j*4+4], seed) select {
case <-ctx.Done():
return ctx.Err()
default:
}
if _, err := io.ReadFull(random.Reader, nonce[:4]); err != nil {
return err
}
res, err := f(nonce) res, err := f(nonce)
if err != nil { if err != nil {
errs <- err return err
return
} }
if res != nil { if res != nil {
result <- res result <- res
return return nil
}
select {
case <-quit:
return
default:
} }
} }
}(i) })
} }
defer close(quit) var err error
select { go func() {
case <-ctx.Done(): err = eg.Wait()
return nil, ctx.Err() result <- nil
case err := <-errs: }()
r := <-result
if r == nil {
return nil, err return nil, err
case res := <-result:
return res, nil
} }
return r, nil
} }
// extracts ephemeral public key from the chunk data to use with el-Gamal // extracts ephemeral public key from the chunk data to use with el-Gamal
......
...@@ -166,13 +166,14 @@ func (p *PullSyncMock) SyncInterval(ctx context.Context, peer swarm.Address, bin ...@@ -166,13 +166,14 @@ func (p *PullSyncMock) SyncInterval(ctx context.Context, peer swarm.Address, bin
return 0, 1, context.Canceled return 0, 1, context.Canceled
} }
if isLive && len(p.liveSyncReplies) > 0 { if isLive && len(p.liveSyncReplies) > 0 {
p.mtx.Lock()
if p.liveSyncCalls >= len(p.liveSyncReplies) { if p.liveSyncCalls >= len(p.liveSyncReplies) {
p.mtx.Unlock()
<-p.quit <-p.quit
// when shutting down, onthe puller side we cancel the context going into the pullsync protocol request // when shutting down, onthe puller side we cancel the context going into the pullsync protocol request
// this results in SyncInterval returning with a context cancelled error // this results in SyncInterval returning with a context cancelled error
return 0, 0, context.Canceled return 0, 0, context.Canceled
} }
p.mtx.Lock()
v := p.liveSyncReplies[p.liveSyncCalls] v := p.liveSyncReplies[p.liveSyncCalls]
p.liveSyncCalls++ p.liveSyncCalls++
p.mtx.Unlock() p.mtx.Unlock()
......
...@@ -21,6 +21,7 @@ import ( ...@@ -21,6 +21,7 @@ import (
"github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/protobuf" "github.com/ethersphere/bee/pkg/p2p/protobuf"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/pullsync/pb" "github.com/ethersphere/bee/pkg/pullsync/pb"
"github.com/ethersphere/bee/pkg/pullsync/pullstorage" "github.com/ethersphere/bee/pkg/pullsync/pullstorage"
"github.com/ethersphere/bee/pkg/soc" "github.com/ethersphere/bee/pkg/soc"
...@@ -67,7 +68,7 @@ type Syncer struct { ...@@ -67,7 +68,7 @@ type Syncer struct {
quit chan struct{} quit chan struct{}
wg sync.WaitGroup wg sync.WaitGroup
unwrap func(swarm.Chunk) unwrap func(swarm.Chunk)
validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error) validStamp postage.ValidStampFn
ruidMtx sync.Mutex ruidMtx sync.Mutex
ruidCtx map[uint32]func() ruidCtx map[uint32]func()
...@@ -76,7 +77,7 @@ type Syncer struct { ...@@ -76,7 +77,7 @@ type Syncer struct {
io.Closer io.Closer
} }
func New(streamer p2p.Streamer, storage pullstorage.Storer, unwrap func(swarm.Chunk), validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error), logger logging.Logger) *Syncer { func New(streamer p2p.Streamer, storage pullstorage.Storer, unwrap func(swarm.Chunk), validStamp postage.ValidStampFn, logger logging.Logger) *Syncer {
return &Syncer{ return &Syncer{
streamer: streamer, streamer: streamer,
storage: storage, storage: storage,
...@@ -228,7 +229,8 @@ func (s *Syncer) SyncInterval(ctx context.Context, peer swarm.Address, bin uint8 ...@@ -228,7 +229,8 @@ func (s *Syncer) SyncInterval(ctx context.Context, peer swarm.Address, bin uint8
chunk := swarm.NewChunk(addr, delivery.Data) chunk := swarm.NewChunk(addr, delivery.Data)
if chunk, err = s.validStamp(chunk, delivery.Stamp); err != nil { if chunk, err = s.validStamp(chunk, delivery.Stamp); err != nil {
return 0, ru.Ruid, err s.logger.Debugf("unverified chunk: %w", err)
continue
} }
if cac.Valid(chunk) { if cac.Valid(chunk) {
......
...@@ -19,6 +19,7 @@ import ( ...@@ -19,6 +19,7 @@ import (
"github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/protobuf" "github.com/ethersphere/bee/pkg/p2p/protobuf"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/pricer" "github.com/ethersphere/bee/pkg/pricer"
"github.com/ethersphere/bee/pkg/pushsync/pb" "github.com/ethersphere/bee/pkg/pushsync/pb"
"github.com/ethersphere/bee/pkg/soc" "github.com/ethersphere/bee/pkg/soc"
...@@ -71,7 +72,7 @@ type PushSync struct { ...@@ -71,7 +72,7 @@ type PushSync struct {
pricer pricer.Interface pricer pricer.Interface
metrics metrics metrics metrics
tracer *tracing.Tracer tracer *tracing.Tracer
validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error) validStamp postage.ValidStampFn
signer crypto.Signer signer crypto.Signer
isFullNode bool isFullNode bool
warmupPeriod time.Time warmupPeriod time.Time
...@@ -82,7 +83,7 @@ var defaultTTL = 20 * time.Second // request time to live ...@@ -82,7 +83,7 @@ var defaultTTL = 20 * time.Second // request time to live
var timeToWaitForPushsyncToNeighbor = 3 * time.Second // time to wait to get a receipt for a chunk var timeToWaitForPushsyncToNeighbor = 3 * time.Second // time to wait to get a receipt for a chunk
var nPeersToPushsync = 3 // number of peers to replicate to as receipt is sent upstream var nPeersToPushsync = 3 // number of peers to replicate to as receipt is sent upstream
func New(address swarm.Address, blockHash []byte, streamer p2p.StreamerDisconnecter, storer storage.Putter, topology topology.Driver, tagger *tags.Tags, isFullNode bool, unwrap func(swarm.Chunk), validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error), logger logging.Logger, accounting accounting.Interface, pricer pricer.Interface, signer crypto.Signer, tracer *tracing.Tracer, warmupTime time.Duration) *PushSync { func New(address swarm.Address, blockHash []byte, streamer p2p.StreamerDisconnecter, storer storage.Putter, topology topology.Driver, tagger *tags.Tags, isFullNode bool, unwrap func(swarm.Chunk), validStamp postage.ValidStampFn, logger logging.Logger, accounting accounting.Interface, pricer pricer.Interface, signer crypto.Signer, tracer *tracing.Tracer, warmupTime time.Duration) *PushSync {
ps := &PushSync{ ps := &PushSync{
address: address, address: address,
blockHash: blockHash, blockHash: blockHash,
...@@ -139,9 +140,14 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) ...@@ -139,9 +140,14 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
ps.metrics.TotalReceived.Inc() ps.metrics.TotalReceived.Inc()
chunk := swarm.NewChunk(swarm.NewAddress(ch.Address), ch.Data) chunk := swarm.NewChunk(swarm.NewAddress(ch.Address), ch.Data)
if chunk, err = ps.validStamp(chunk, ch.Stamp); err != nil { chunkAddress := chunk.Address()
return fmt.Errorf("pushsync valid stamp: %w", err) stamp := new(postage.Stamp)
// attaching the stamp is required becase pushToClosest expects a chunk with a stamp
err = stamp.UnmarshalBinary(ch.Stamp)
if err != nil {
return fmt.Errorf("pushsync stamp unmarshall: %w", err)
} }
chunk.WithStamp(stamp)
if cac.Valid(chunk) { if cac.Valid(chunk) {
if ps.unwrap != nil { if ps.unwrap != nil {
...@@ -151,16 +157,21 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) ...@@ -151,16 +157,21 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
return swarm.ErrInvalidChunk return swarm.ErrInvalidChunk
} }
price := ps.pricer.Price(chunk.Address()) price := ps.pricer.Price(chunkAddress)
// if the peer is closer to the chunk, AND it's a full node, we were selected for replication. Return early. // if the peer is closer to the chunk, AND it's a full node, we were selected for replication. Return early.
if p.FullNode { if p.FullNode {
bytes := chunk.Address().Bytes() bytes := chunkAddress.Bytes()
if dcmp, _ := swarm.DistanceCmp(bytes, p.Address.Bytes(), ps.address.Bytes()); dcmp == 1 { if dcmp, _ := swarm.DistanceCmp(bytes, p.Address.Bytes(), ps.address.Bytes()); dcmp == 1 {
if ps.topologyDriver.IsWithinDepth(chunk.Address()) { if ps.topologyDriver.IsWithinDepth(chunkAddress) {
ctxd, canceld := context.WithTimeout(context.Background(), timeToWaitForPushsyncToNeighbor) ctxd, canceld := context.WithTimeout(context.Background(), timeToWaitForPushsyncToNeighbor)
defer canceld() defer canceld()
chunk, err = ps.validStamp(chunk, ch.Stamp)
if err != nil {
return fmt.Errorf("pushsync valid stamp: %w", err)
}
_, err = ps.storer.Put(ctxd, storage.ModePutSync, chunk) _, err = ps.storer.Put(ctxd, storage.ModePutSync, chunk)
if err != nil { if err != nil {
return fmt.Errorf("chunk store: %w", err) return fmt.Errorf("chunk store: %w", err)
...@@ -191,7 +202,13 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) ...@@ -191,7 +202,13 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
// forwarding replication // forwarding replication
storedChunk := false storedChunk := false
if ps.topologyDriver.IsWithinDepth(chunk.Address()) { if ps.topologyDriver.IsWithinDepth(chunkAddress) {
chunk, err = ps.validStamp(chunk, ch.Stamp)
if err != nil {
return fmt.Errorf("pushsync valid stamp: %w", err)
}
_, err = ps.storer.Put(ctx, storage.ModePutSync, chunk) _, err = ps.storer.Put(ctx, storage.ModePutSync, chunk)
if err != nil { if err != nil {
ps.logger.Warningf("pushsync: within depth peer's attempt to store chunk failed: %v", err) ps.logger.Warningf("pushsync: within depth peer's attempt to store chunk failed: %v", err)
...@@ -200,13 +217,19 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) ...@@ -200,13 +217,19 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
} }
} }
span, _, ctx := ps.tracer.StartSpanFromContext(ctx, "pushsync-handler", ps.logger, opentracing.Tag{Key: "address", Value: chunk.Address().String()}) span, _, ctx := ps.tracer.StartSpanFromContext(ctx, "pushsync-handler", ps.logger, opentracing.Tag{Key: "address", Value: chunkAddress.String()})
defer span.Finish() defer span.Finish()
receipt, err := ps.pushToClosest(ctx, chunk, false, p.Address) receipt, err := ps.pushToClosest(ctx, chunk, false, p.Address)
if err != nil { if err != nil {
if errors.Is(err, topology.ErrWantSelf) { if errors.Is(err, topology.ErrWantSelf) {
if !storedChunk { if !storedChunk {
chunk, err = ps.validStamp(chunk, ch.Stamp)
if err != nil {
return fmt.Errorf("pushsync valid stamp: %w", err)
}
_, err = ps.storer.Put(ctx, storage.ModePutSync, chunk) _, err = ps.storer.Put(ctx, storage.ModePutSync, chunk)
if err != nil { if err != nil {
return fmt.Errorf("chunk store: %w", err) return fmt.Errorf("chunk store: %w", err)
...@@ -225,7 +248,7 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) ...@@ -225,7 +248,7 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
} }
defer debit.Cleanup() defer debit.Cleanup()
receipt := pb.Receipt{Address: chunk.Address().Bytes(), Signature: signature, BlockHash: ps.blockHash} receipt := pb.Receipt{Address: chunkAddress.Bytes(), Signature: signature, BlockHash: ps.blockHash}
if err := w.WriteMsgWithContext(ctx, &receipt); err != nil { if err := w.WriteMsgWithContext(ctx, &receipt); err != nil {
return fmt.Errorf("send receipt to peer %s: %w", p.Address.String(), err) return fmt.Errorf("send receipt to peer %s: %w", p.Address.String(), err)
} }
...@@ -295,10 +318,14 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, retryAllo ...@@ -295,10 +318,14 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, retryAllo
// in which case we should return immediately. // in which case we should return immediately.
// if ErrWantSelf is returned, it means we are the closest peer. // if ErrWantSelf is returned, it means we are the closest peer.
if errors.Is(err, topology.ErrWantSelf) { if errors.Is(err, topology.ErrWantSelf) {
if time.Now().Before(ps.warmupPeriod) { if !ps.warmedUp() {
return nil, ErrWarmup return nil, ErrWarmup
} }
if !ps.topologyDriver.IsWithinDepth(ch.Address()) {
return nil, ErrNoPush
}
count := 0 count := 0
// Push the chunk to some peers in the neighborhood in parallel for replication. // Push the chunk to some peers in the neighborhood in parallel for replication.
// Any errors here should NOT impact the rest of the handler. // Any errors here should NOT impact the rest of the handler.
...@@ -308,6 +335,12 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, retryAllo ...@@ -308,6 +335,12 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, retryAllo
return false, false, nil return false, false, nil
} }
// here we skip the peer if the peer is closer to the chunk than us
// we replicate with peers that are further away than us because we are the storer
if dcmp, _ := swarm.DistanceCmp(ch.Address().Bytes(), peer.Bytes(), ps.address.Bytes()); dcmp == 1 {
return false, false, nil
}
if count == nPeersToPushsync { if count == nPeersToPushsync {
return true, false, nil return true, false, nil
} }
...@@ -342,7 +375,16 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, retryAllo ...@@ -342,7 +375,16 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, retryAllo
} }
if err != nil { if err != nil {
logger.Debugf("could not push to peer %s: %v", peer, err) logger.Debugf("could not push to peer %s: %v", peer, err)
resultC <- &pushResult{err: err, attempted: attempted}
// if the node has warmed up AND no other closer peer has been tried
if ps.warmedUp() && !ps.skipList.HasChunk(ch.Address()) {
ps.skipList.Add(peer, ch.Address(), skipPeerExpiration)
}
select {
case resultC <- &pushResult{err: err, attempted: attempted}:
case <-ctx.Done():
}
return return
} }
select { select {
...@@ -360,11 +402,6 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, retryAllo ...@@ -360,11 +402,6 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, retryAllo
} }
if r.err != nil && r.attempted { if r.err != nil && r.attempted {
ps.metrics.TotalFailedSendAttempts.Inc() ps.metrics.TotalFailedSendAttempts.Inc()
// if the node has warmed up AND no other closer peer has been tried
if time.Now().After(ps.warmupPeriod) && !ps.skipList.HasChunk(ch.Address()) {
ps.skipList.Add(peer, ch.Address(), skipPeerExpiration)
}
} }
case <-ctx.Done(): case <-ctx.Done():
return nil, ctx.Err() return nil, ctx.Err()
...@@ -503,6 +540,10 @@ func (ps *PushSync) pushToNeighbour(peer swarm.Address, ch swarm.Chunk, origin b ...@@ -503,6 +540,10 @@ func (ps *PushSync) pushToNeighbour(peer swarm.Address, ch swarm.Chunk, origin b
err = ps.accounting.Credit(peer, receiptPrice, origin) err = ps.accounting.Credit(peer, receiptPrice, origin)
} }
func (ps *PushSync) warmedUp() bool {
return time.Now().After(ps.warmupPeriod)
}
type peerSkipList struct { type peerSkipList struct {
sync.Mutex sync.Mutex
chunks map[string]struct{} chunks map[string]struct{}
...@@ -516,7 +557,7 @@ func newPeerSkipList() *peerSkipList { ...@@ -516,7 +557,7 @@ func newPeerSkipList() *peerSkipList {
} }
} }
func (l *peerSkipList) Add(peer swarm.Address, chunk swarm.Address, expire time.Duration) { func (l *peerSkipList) Add(peer, chunk swarm.Address, expire time.Duration) {
l.Lock() l.Lock()
defer l.Unlock() defer l.Unlock()
......
...@@ -22,7 +22,6 @@ import ( ...@@ -22,7 +22,6 @@ import (
"github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/protobuf" "github.com/ethersphere/bee/pkg/p2p/protobuf"
"github.com/ethersphere/bee/pkg/p2p/streamtest" "github.com/ethersphere/bee/pkg/p2p/streamtest"
"github.com/ethersphere/bee/pkg/postage"
pricermock "github.com/ethersphere/bee/pkg/pricer/mock" pricermock "github.com/ethersphere/bee/pkg/pricer/mock"
"github.com/ethersphere/bee/pkg/pushsync" "github.com/ethersphere/bee/pkg/pushsync"
"github.com/ethersphere/bee/pkg/pushsync/pb" "github.com/ethersphere/bee/pkg/pushsync/pb"
...@@ -52,6 +51,9 @@ var ( ...@@ -52,6 +51,9 @@ var (
defaultSigner = cryptomock.New(cryptomock.WithSignFunc(func([]byte) ([]byte, error) { defaultSigner = cryptomock.New(cryptomock.WithSignFunc(func([]byte) ([]byte, error) {
return nil, nil return nil, nil
})) }))
WithinDepthMock = mock.WithIsWithinFunc(func(addr swarm.Address) bool {
return true
})
) )
// TestPushClosest inserts a chunk as uploaded chunk in db. This triggers sending a chunk to the closest node // TestPushClosest inserts a chunk as uploaded chunk in db. This triggers sending a chunk to the closest node
...@@ -67,7 +69,7 @@ func TestPushClosest(t *testing.T) { ...@@ -67,7 +69,7 @@ func TestPushClosest(t *testing.T) {
// peer is the node responding to the chunk receipt message // peer is the node responding to the chunk receipt message
// mock should return ErrWantSelf since there's no one to forward to // mock should return ErrWantSelf since there's no one to forward to
psPeer, storerPeer, _, peerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf)) psPeer, storerPeer, _, peerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf), WithinDepthMock)
defer storerPeer.Close() defer storerPeer.Close()
recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()), streamtest.WithBaseAddr(pivotNode)) recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()), streamtest.WithBaseAddr(pivotNode))
...@@ -128,25 +130,24 @@ func TestReplicateBeforeReceipt(t *testing.T) { ...@@ -128,25 +130,24 @@ func TestReplicateBeforeReceipt(t *testing.T) {
// it's address is closer to the chunk than secondPeer but it will not receive the chunk // it's address is closer to the chunk than secondPeer but it will not receive the chunk
psEmpty, storerEmpty, _, _ := createPushSyncNode(t, emptyPeer, defaultPrices, nil, nil, defaultSigner) psEmpty, storerEmpty, _, _ := createPushSyncNode(t, emptyPeer, defaultPrices, nil, nil, defaultSigner)
defer storerEmpty.Close() defer storerEmpty.Close()
emptyRecorder := streamtest.New(streamtest.WithProtocols(psEmpty.Protocol()), streamtest.WithBaseAddr(secondPeer))
wFunc := func(addr swarm.Address) bool { emptyRecorder := streamtest.New(streamtest.WithProtocols(psEmpty.Protocol()), streamtest.WithBaseAddr(secondPeer))
return true
}
// node that is connected to closestPeer // node that is connected to closestPeer
// will receieve chunk from closestPeer // will receieve chunk from closestPeer
psSecond, storerSecond, _, secondAccounting := createPushSyncNode(t, secondPeer, defaultPrices, emptyRecorder, nil, defaultSigner, mock.WithPeers(emptyPeer), mock.WithIsWithinFunc(wFunc)) psSecond, storerSecond, _, secondAccounting := createPushSyncNode(t, secondPeer, defaultPrices, emptyRecorder, nil, defaultSigner, mock.WithPeers(emptyPeer), WithinDepthMock)
defer storerSecond.Close() defer storerSecond.Close()
secondRecorder := streamtest.New(streamtest.WithProtocols(psSecond.Protocol()), streamtest.WithBaseAddr(closestPeer)) secondRecorder := streamtest.New(streamtest.WithProtocols(psSecond.Protocol()), streamtest.WithBaseAddr(closestPeer))
psStorer, storerPeer, _, storerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, secondRecorder, nil, defaultSigner, mock.WithPeers(secondPeer), mock.WithClosestPeerErr(topology.ErrWantSelf)) psStorer, storerPeer, _, storerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, secondRecorder, nil, defaultSigner, mock.WithPeers(secondPeer), mock.WithClosestPeerErr(topology.ErrWantSelf), WithinDepthMock)
defer storerPeer.Close() defer storerPeer.Close()
recorder := streamtest.New(streamtest.WithProtocols(psStorer.Protocol()), streamtest.WithBaseAddr(pivotNode)) recorder := streamtest.New(streamtest.WithProtocols(psStorer.Protocol()), streamtest.WithBaseAddr(pivotNode))
// pivot node needs the streamer since the chunk is intercepted by // pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream // the chunk worker, then gets sent by opening a new stream
psPivot, storerPivot, _, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithClosestPeer(closestPeer)) psPivot, storerPivot, _, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithPeers(closestPeer))
defer storerPivot.Close() defer storerPivot.Close()
// Trigger the sending of chunk to the closest node // Trigger the sending of chunk to the closest node
...@@ -165,9 +166,6 @@ func TestReplicateBeforeReceipt(t *testing.T) { ...@@ -165,9 +166,6 @@ func TestReplicateBeforeReceipt(t *testing.T) {
// this intercepts the incoming receipt message // this intercepts the incoming receipt message
waitOnRecordAndTest(t, closestPeer, recorder, chunk.Address(), nil) waitOnRecordAndTest(t, closestPeer, recorder, chunk.Address(), nil)
// sleep for a bit to allow the second peer to the store replicated chunk
time.Sleep(time.Millisecond * 500)
// this intercepts the outgoing delivery message from storer node to second storer node // this intercepts the outgoing delivery message from storer node to second storer node
waitOnRecordAndTest(t, secondPeer, secondRecorder, chunk.Address(), chunk.Data()) waitOnRecordAndTest(t, secondPeer, secondRecorder, chunk.Address(), chunk.Data())
...@@ -239,13 +237,13 @@ func TestFailToReplicateBeforeReceipt(t *testing.T) { ...@@ -239,13 +237,13 @@ func TestFailToReplicateBeforeReceipt(t *testing.T) {
defer storerSecond.Close() defer storerSecond.Close()
secondRecorder := streamtest.New(streamtest.WithProtocols(psSecond.Protocol()), streamtest.WithBaseAddr(closestPeer)) secondRecorder := streamtest.New(streamtest.WithProtocols(psSecond.Protocol()), streamtest.WithBaseAddr(closestPeer))
psStorer, storerPeer, _, storerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, secondRecorder, nil, defaultSigner, mock.WithPeers(secondPeer), mock.WithClosestPeerErr(topology.ErrWantSelf)) psStorer, storerPeer, _, storerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, secondRecorder, nil, defaultSigner, mock.WithPeers(secondPeer), mock.WithClosestPeerErr(topology.ErrWantSelf), WithinDepthMock)
defer storerPeer.Close() defer storerPeer.Close()
recorder := streamtest.New(streamtest.WithProtocols(psStorer.Protocol()), streamtest.WithBaseAddr(pivotNode)) recorder := streamtest.New(streamtest.WithProtocols(psStorer.Protocol()), streamtest.WithBaseAddr(pivotNode))
// pivot node needs the streamer since the chunk is intercepted by // pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream // the chunk worker, then gets sent by opening a new stream
psPivot, storerPivot, _, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithClosestPeer(closestPeer)) psPivot, storerPivot, _, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithPeers(closestPeer))
defer storerPivot.Close() defer storerPivot.Close()
// Trigger the sending of chunk to the closest node // Trigger the sending of chunk to the closest node
...@@ -264,9 +262,6 @@ func TestFailToReplicateBeforeReceipt(t *testing.T) { ...@@ -264,9 +262,6 @@ func TestFailToReplicateBeforeReceipt(t *testing.T) {
// this intercepts the incoming receipt message // this intercepts the incoming receipt message
waitOnRecordAndTest(t, closestPeer, recorder, chunk.Address(), nil) waitOnRecordAndTest(t, closestPeer, recorder, chunk.Address(), nil)
// sleep for a bit to allow the second peer to the store replicated chunk
time.Sleep(time.Millisecond * 500)
// this intercepts the outgoing delivery message from storer node to second storer node // this intercepts the outgoing delivery message from storer node to second storer node
waitOnRecordAndTest(t, secondPeer, secondRecorder, chunk.Address(), chunk.Data()) waitOnRecordAndTest(t, secondPeer, secondRecorder, chunk.Address(), chunk.Data())
...@@ -325,7 +320,7 @@ func TestPushChunkToClosest(t *testing.T) { ...@@ -325,7 +320,7 @@ func TestPushChunkToClosest(t *testing.T) {
// peer is the node responding to the chunk receipt message // peer is the node responding to the chunk receipt message
// mock should return ErrWantSelf since there's no one to forward to // mock should return ErrWantSelf since there's no one to forward to
psPeer, storerPeer, _, peerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, chanFunc(callbackC), defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf)) psPeer, storerPeer, _, peerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, chanFunc(callbackC), defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf), WithinDepthMock)
defer storerPeer.Close() defer storerPeer.Close()
recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()), streamtest.WithBaseAddr(pivotNode)) recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()), streamtest.WithBaseAddr(pivotNode))
...@@ -416,7 +411,7 @@ func TestPushChunkToNextClosest(t *testing.T) { ...@@ -416,7 +411,7 @@ func TestPushChunkToNextClosest(t *testing.T) {
psPeer1, storerPeer1, _, peerAccounting1 := createPushSyncNode(t, peer1, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf)) psPeer1, storerPeer1, _, peerAccounting1 := createPushSyncNode(t, peer1, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer1.Close() defer storerPeer1.Close()
psPeer2, storerPeer2, _, peerAccounting2 := createPushSyncNode(t, peer2, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf)) psPeer2, storerPeer2, _, peerAccounting2 := createPushSyncNode(t, peer2, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf), WithinDepthMock)
defer storerPeer2.Close() defer storerPeer2.Close()
var fail = true var fail = true
...@@ -547,7 +542,7 @@ func TestPushChunkToClosestFailedAttemptRetry(t *testing.T) { ...@@ -547,7 +542,7 @@ func TestPushChunkToClosestFailedAttemptRetry(t *testing.T) {
psPeer3, storerPeer3, _, peerAccounting3 := createPushSyncNode(t, peer3, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf)) psPeer3, storerPeer3, _, peerAccounting3 := createPushSyncNode(t, peer3, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer3.Close() defer storerPeer3.Close()
psPeer4, storerPeer4, _, peerAccounting4 := createPushSyncNode(t, peer4, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf)) psPeer4, storerPeer4, _, peerAccounting4 := createPushSyncNode(t, peer4, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf), WithinDepthMock)
defer storerPeer4.Close() defer storerPeer4.Close()
recorder := streamtest.New( recorder := streamtest.New(
...@@ -666,19 +661,19 @@ func TestHandler(t *testing.T) { ...@@ -666,19 +661,19 @@ func TestHandler(t *testing.T) {
closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000")
// Create the closest peer // Create the closest peer
psClosestPeer, closestStorerPeerDB, _, closestAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf)) psClosestPeer, closestStorerPeerDB, _, closestAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf), WithinDepthMock)
defer closestStorerPeerDB.Close() defer closestStorerPeerDB.Close()
closestRecorder := streamtest.New(streamtest.WithProtocols(psClosestPeer.Protocol()), streamtest.WithBaseAddr(pivotPeer)) closestRecorder := streamtest.New(streamtest.WithProtocols(psClosestPeer.Protocol()), streamtest.WithBaseAddr(pivotPeer))
// creating the pivot peer // creating the pivot peer
psPivot, storerPivotDB, _, pivotAccounting := createPushSyncNode(t, pivotPeer, defaultPrices, closestRecorder, nil, defaultSigner, mock.WithClosestPeer(closestPeer)) psPivot, storerPivotDB, _, pivotAccounting := createPushSyncNode(t, pivotPeer, defaultPrices, closestRecorder, nil, defaultSigner, mock.WithPeers(closestPeer))
defer storerPivotDB.Close() defer storerPivotDB.Close()
pivotRecorder := streamtest.New(streamtest.WithProtocols(psPivot.Protocol()), streamtest.WithBaseAddr(triggerPeer)) pivotRecorder := streamtest.New(streamtest.WithProtocols(psPivot.Protocol()), streamtest.WithBaseAddr(triggerPeer))
// Creating the trigger peer // Creating the trigger peer
psTriggerPeer, triggerStorerDB, _, triggerAccounting := createPushSyncNode(t, triggerPeer, defaultPrices, pivotRecorder, nil, defaultSigner, mock.WithClosestPeer(pivotPeer)) psTriggerPeer, triggerStorerDB, _, triggerAccounting := createPushSyncNode(t, triggerPeer, defaultPrices, pivotRecorder, nil, defaultSigner, mock.WithPeers(pivotPeer))
defer triggerStorerDB.Close() defer triggerStorerDB.Close()
receipt, err := psTriggerPeer.PushChunkToClosest(context.Background(), chunk) receipt, err := psTriggerPeer.PushChunkToClosest(context.Background(), chunk)
...@@ -754,13 +749,13 @@ func TestSignsReceipt(t *testing.T) { ...@@ -754,13 +749,13 @@ func TestSignsReceipt(t *testing.T) {
closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000")
// Create the closest peer // Create the closest peer
psClosestPeer, closestStorerPeerDB, _, _ := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, signer, mock.WithClosestPeerErr(topology.ErrWantSelf)) psClosestPeer, closestStorerPeerDB, _, _ := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, signer, mock.WithClosestPeerErr(topology.ErrWantSelf), WithinDepthMock)
defer closestStorerPeerDB.Close() defer closestStorerPeerDB.Close()
closestRecorder := streamtest.New(streamtest.WithProtocols(psClosestPeer.Protocol()), streamtest.WithBaseAddr(pivotPeer)) closestRecorder := streamtest.New(streamtest.WithProtocols(psClosestPeer.Protocol()), streamtest.WithBaseAddr(pivotPeer))
// creating the pivot peer who will act as a forwarder node with a higher price (17) // creating the pivot peer who will act as a forwarder node with a higher price (17)
psPivot, storerPivotDB, _, _ := createPushSyncNode(t, pivotPeer, defaultPrices, closestRecorder, nil, signer, mock.WithClosestPeer(closestPeer)) psPivot, storerPivotDB, _, _ := createPushSyncNode(t, pivotPeer, defaultPrices, closestRecorder, nil, signer, mock.WithPeers(closestPeer))
defer storerPivotDB.Close() defer storerPivotDB.Close()
receipt, err := psPivot.PushChunkToClosest(context.Background(), chunk) receipt, err := psPivot.PushChunkToClosest(context.Background(), chunk)
...@@ -784,86 +779,6 @@ func TestSignsReceipt(t *testing.T) { ...@@ -784,86 +779,6 @@ func TestSignsReceipt(t *testing.T) {
t.Fatal("receipt block hash do not match") t.Fatal("receipt block hash do not match")
} }
} }
func createPushSyncNode(t *testing.T, addr swarm.Address, prices pricerParameters, recorder *streamtest.Recorder, unwrap func(swarm.Chunk), signer crypto.Signer, mockOpts ...mock.Option) (*pushsync.PushSync, *mocks.MockStorer, *tags.Tags, accounting.Interface) {
t.Helper()
mockAccounting := accountingmock.NewAccounting()
ps, mstorer, ts := createPushSyncNodeWithAccounting(t, addr, prices, recorder, unwrap, signer, mockAccounting, mockOpts...)
return ps, mstorer, ts, mockAccounting
}
func createPushSyncNodeWithAccounting(t *testing.T, addr swarm.Address, prices pricerParameters, recorder *streamtest.Recorder, unwrap func(swarm.Chunk), signer crypto.Signer, acct accounting.Interface, mockOpts ...mock.Option) (*pushsync.PushSync, *mocks.MockStorer, *tags.Tags) {
t.Helper()
logger := logging.New(ioutil.Discard, 0)
storer := mocks.NewStorer()
mockTopology := mock.NewTopologyDriver(mockOpts...)
mockStatestore := statestore.NewStateStore()
mtag := tags.NewTags(mockStatestore, logger)
mockPricer := pricermock.NewMockService(prices.price, prices.peerPrice)
recorderDisconnecter := streamtest.NewRecorderDisconnecter(recorder)
if unwrap == nil {
unwrap = func(swarm.Chunk) {}
}
validStamp := func(ch swarm.Chunk, stamp []byte) (swarm.Chunk, error) {
return ch.WithStamp(postage.NewStamp(nil, nil, nil, nil)), nil
}
return pushsync.New(addr, blockHash.Bytes(), recorderDisconnecter, storer, mockTopology, mtag, true, unwrap, validStamp, logger, acct, mockPricer, signer, nil, 0), storer, mtag
}
func waitOnRecordAndTest(t *testing.T, peer swarm.Address, recorder *streamtest.Recorder, add swarm.Address, data []byte) {
t.Helper()
records := recorder.WaitRecords(t, peer, pushsync.ProtocolName, pushsync.ProtocolVersion, pushsync.StreamName, 1, 5)
if data != nil {
messages, err := protobuf.ReadMessages(
bytes.NewReader(records[0].In()),
func() protobuf.Message { return new(pb.Delivery) },
)
if err != nil {
t.Fatal(err)
}
if messages == nil {
t.Fatal("nil rcvd. for message")
}
if len(messages) > 1 {
t.Fatal("too many messages")
}
delivery := messages[0].(*pb.Delivery)
if !bytes.Equal(delivery.Address, add.Bytes()) {
t.Fatalf("chunk address mismatch")
}
if !bytes.Equal(delivery.Data, data) {
t.Fatalf("chunk data mismatch")
}
} else {
messages, err := protobuf.ReadMessages(
bytes.NewReader(records[0].In()),
func() protobuf.Message { return new(pb.Receipt) },
)
if err != nil {
t.Fatal(err)
}
if messages == nil {
t.Fatal("nil rcvd. for message")
}
if len(messages) > 1 {
t.Fatal("too many messages")
}
receipt := messages[0].(*pb.Receipt)
receiptAddress := swarm.NewAddress(receipt.Address)
if !receiptAddress.Equal(add) {
t.Fatalf("receipt address mismatch")
}
}
}
func TestPeerSkipList(t *testing.T) { func TestPeerSkipList(t *testing.T) {
skipList := pushsync.NewPeerSkipList() skipList := pushsync.NewPeerSkipList()
...@@ -915,7 +830,7 @@ func TestPushChunkToClosestSkipFailed(t *testing.T) { ...@@ -915,7 +830,7 @@ func TestPushChunkToClosestSkipFailed(t *testing.T) {
psPeer2, storerPeer2, _, _ := createPushSyncNode(t, peer2, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf)) psPeer2, storerPeer2, _, _ := createPushSyncNode(t, peer2, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer2.Close() defer storerPeer2.Close()
psPeer3, storerPeer3, _, _ := createPushSyncNode(t, peer3, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf)) psPeer3, storerPeer3, _, _ := createPushSyncNode(t, peer3, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf), WithinDepthMock)
defer storerPeer3.Close() defer storerPeer3.Close()
var ( var (
...@@ -984,6 +899,86 @@ func TestPushChunkToClosestSkipFailed(t *testing.T) { ...@@ -984,6 +899,86 @@ func TestPushChunkToClosestSkipFailed(t *testing.T) {
} }
} }
func createPushSyncNode(t *testing.T, addr swarm.Address, prices pricerParameters, recorder *streamtest.Recorder, unwrap func(swarm.Chunk), signer crypto.Signer, mockOpts ...mock.Option) (*pushsync.PushSync, *mocks.MockStorer, *tags.Tags, accounting.Interface) {
t.Helper()
mockAccounting := accountingmock.NewAccounting()
ps, mstorer, ts := createPushSyncNodeWithAccounting(t, addr, prices, recorder, unwrap, signer, mockAccounting, mockOpts...)
return ps, mstorer, ts, mockAccounting
}
func createPushSyncNodeWithAccounting(t *testing.T, addr swarm.Address, prices pricerParameters, recorder *streamtest.Recorder, unwrap func(swarm.Chunk), signer crypto.Signer, acct accounting.Interface, mockOpts ...mock.Option) (*pushsync.PushSync, *mocks.MockStorer, *tags.Tags) {
t.Helper()
logger := logging.New(ioutil.Discard, 0)
storer := mocks.NewStorer()
mockTopology := mock.NewTopologyDriver(mockOpts...)
mockStatestore := statestore.NewStateStore()
mtag := tags.NewTags(mockStatestore, logger)
mockPricer := pricermock.NewMockService(prices.price, prices.peerPrice)
recorderDisconnecter := streamtest.NewRecorderDisconnecter(recorder)
if unwrap == nil {
unwrap = func(swarm.Chunk) {}
}
validStamp := func(ch swarm.Chunk, stamp []byte) (swarm.Chunk, error) {
return ch, nil
}
return pushsync.New(addr, blockHash.Bytes(), recorderDisconnecter, storer, mockTopology, mtag, true, unwrap, validStamp, logger, acct, mockPricer, signer, nil, -1), storer, mtag
}
func waitOnRecordAndTest(t *testing.T, peer swarm.Address, recorder *streamtest.Recorder, add swarm.Address, data []byte) {
t.Helper()
records := recorder.WaitRecords(t, peer, pushsync.ProtocolName, pushsync.ProtocolVersion, pushsync.StreamName, 1, 5)
if data != nil {
messages, err := protobuf.ReadMessages(
bytes.NewReader(records[0].In()),
func() protobuf.Message { return new(pb.Delivery) },
)
if err != nil {
t.Fatal(err)
}
if messages == nil {
t.Fatal("nil rcvd. for message")
}
if len(messages) > 1 {
t.Fatal("too many messages")
}
delivery := messages[0].(*pb.Delivery)
if !bytes.Equal(delivery.Address, add.Bytes()) {
t.Fatalf("chunk address mismatch")
}
if !bytes.Equal(delivery.Data, data) {
t.Fatalf("chunk data mismatch")
}
} else {
messages, err := protobuf.ReadMessages(
bytes.NewReader(records[0].In()),
func() protobuf.Message { return new(pb.Receipt) },
)
if err != nil {
t.Fatal(err)
}
if messages == nil {
t.Fatal("nil rcvd. for message")
}
if len(messages) > 1 {
t.Fatal("too many messages")
}
receipt := messages[0].(*pb.Receipt)
receiptAddress := swarm.NewAddress(receipt.Address)
if !receiptAddress.Equal(add) {
t.Fatalf("receipt address mismatch")
}
}
}
func chanFunc(c chan<- struct{}) func(swarm.Chunk) { func chanFunc(c chan<- struct{}) func(swarm.Chunk) {
return func(_ swarm.Chunk) { return func(_ swarm.Chunk) {
c <- struct{}{} c <- struct{}{}
......
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ratelimit provides a mechanism to rate limit requests based on a string key,
// refill rate and burst amount. Under the hood, it's a token bucket of size burst amount,
// that refills at the refill rate.
package ratelimit
import (
"sync"
"time"
"golang.org/x/time/rate"
)
type Limiter struct {
mtx sync.Mutex
limiter map[string]*rate.Limiter
rate rate.Limit
burst int
}
// New returns a new Limiter object with refresh rate and burst amount
func New(r time.Duration, burst int) *Limiter {
return &Limiter{
limiter: make(map[string]*rate.Limiter),
rate: rate.Every(r),
burst: burst,
}
}
// Allow checks if the limiter that belongs to 'key' has not exceeded the limit.
func (l *Limiter) Allow(key string, count int) bool {
l.mtx.Lock()
defer l.mtx.Unlock()
limiter, ok := l.limiter[key]
if !ok {
limiter = rate.NewLimiter(l.rate, l.burst)
l.limiter[key] = limiter
}
return limiter.AllowN(time.Now(), count)
}
// Clear deletes the limiter that belongs to 'key'
func (l *Limiter) Clear(key string) {
l.mtx.Lock()
defer l.mtx.Unlock()
delete(l.limiter, key)
}
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ratelimit_test
import (
"testing"
"time"
"github.com/ethersphere/bee/pkg/ratelimit"
)
func TestRateLimit(t *testing.T) {
var (
key1 = "test1"
key2 = "test2"
rate = time.Second
burst = 10
)
limiter := ratelimit.New(rate, burst)
if !limiter.Allow(key1, burst) {
t.Fatal("want allowed")
}
if limiter.Allow(key1, burst) {
t.Fatalf("want not allowed")
}
limiter.Clear(key1)
if !limiter.Allow(key1, burst) {
t.Fatal("want allowed")
}
if !limiter.Allow(key2, burst) {
t.Fatal("want allowed")
}
}
...@@ -233,12 +233,12 @@ func newTestNetStore(t *testing.T, recoveryFunc recovery.Callback) storage.Store ...@@ -233,12 +233,12 @@ func newTestNetStore(t *testing.T, recoveryFunc recovery.Callback) storage.Store
return nil return nil
}} }}
server := retrieval.New(swarm.ZeroAddress, mockStorer, nil, ps0, logger, serverMockAccounting, pricerMock, nil) server := retrieval.New(swarm.ZeroAddress, mockStorer, nil, ps0, logger, serverMockAccounting, pricerMock, nil, false, noopStampValidator)
recorder := streamtest.New( recorder := streamtest.New(
streamtest.WithProtocols(server.Protocol()), streamtest.WithProtocols(server.Protocol()),
streamtest.WithBaseAddr(peerID), streamtest.WithBaseAddr(peerID),
) )
retrieve := retrieval.New(swarm.ZeroAddress, mockStorer, recorder, ps, logger, serverMockAccounting, pricerMock, nil) retrieve := retrieval.New(swarm.ZeroAddress, mockStorer, recorder, ps, logger, serverMockAccounting, pricerMock, nil, false, noopStampValidator)
validStamp := func(ch swarm.Chunk, stamp []byte) (swarm.Chunk, error) { validStamp := func(ch swarm.Chunk, stamp []byte) (swarm.Chunk, error) {
return ch.WithStamp(postage.NewStamp(nil, nil, nil, nil)), nil return ch.WithStamp(postage.NewStamp(nil, nil, nil, nil)), nil
} }
...@@ -267,3 +267,7 @@ func (mp *mockPssSender) Send(ctx context.Context, topic pss.Topic, payload []by ...@@ -267,3 +267,7 @@ func (mp *mockPssSender) Send(ctx context.Context, topic pss.Topic, payload []by
mp.callbackC <- true mp.callbackC <- true
return nil return nil
} }
var noopStampValidator = func(chunk swarm.Chunk, stampBytes []byte) (swarm.Chunk, error) {
return chunk, nil
}
...@@ -14,7 +14,7 @@ import ( ...@@ -14,7 +14,7 @@ import (
"github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/swarm"
) )
func TestENSntegration(t *testing.T) { func TestENSIntegration(t *testing.T) {
// TODO: consider using a stable gateway instead of INFURA. // TODO: consider using a stable gateway instead of INFURA.
defaultEndpoint := "https://goerli.infura.io/v3/59d83a5a4be74f86b9851190c802297b" defaultEndpoint := "https://goerli.infura.io/v3/59d83a5a4be74f86b9851190c802297b"
defaultAddr := swarm.MustParseHexAddress("00cb23598c2e520b6a6aae3ddc94fed4435a2909690bdd709bf9d9e7c2aadfad") defaultAddr := swarm.MustParseHexAddress("00cb23598c2e520b6a6aae3ddc94fed4435a2909690bdd709bf9d9e7c2aadfad")
......
...@@ -14,7 +14,7 @@ const SwarmContentHashPrefix = swarmContentHashPrefix ...@@ -14,7 +14,7 @@ const SwarmContentHashPrefix = swarmContentHashPrefix
var ErrNotImplemented = errNotImplemented var ErrNotImplemented = errNotImplemented
// WithConnectFunc will set the Dial function implementaton. // WithConnectFunc will set the Dial function implementation.
func WithConnectFunc(fn func(endpoint string, contractAddr string) (*ethclient.Client, *goens.Registry, error)) Option { func WithConnectFunc(fn func(endpoint string, contractAddr string) (*ethclient.Client, *goens.Registry, error)) Option {
return func(c *Client) { return func(c *Client) {
c.connectFn = fn c.connectFn = fn
......
...@@ -64,9 +64,11 @@ type Service struct { ...@@ -64,9 +64,11 @@ type Service struct {
metrics metrics metrics metrics
pricer pricer.Interface pricer pricer.Interface
tracer *tracing.Tracer tracer *tracing.Tracer
caching bool
validStamp postage.ValidStampFn
} }
func New(addr swarm.Address, storer storage.Storer, streamer p2p.Streamer, chunkPeerer topology.EachPeerer, logger logging.Logger, accounting accounting.Interface, pricer pricer.Interface, tracer *tracing.Tracer) *Service { func New(addr swarm.Address, storer storage.Storer, streamer p2p.Streamer, chunkPeerer topology.EachPeerer, logger logging.Logger, accounting accounting.Interface, pricer pricer.Interface, tracer *tracing.Tracer, forwarderCaching bool, validStamp postage.ValidStampFn) *Service {
return &Service{ return &Service{
addr: addr, addr: addr,
streamer: streamer, streamer: streamer,
...@@ -77,6 +79,8 @@ func New(addr swarm.Address, storer storage.Storer, streamer p2p.Streamer, chunk ...@@ -77,6 +79,8 @@ func New(addr swarm.Address, storer storage.Storer, streamer p2p.Streamer, chunk
pricer: pricer, pricer: pricer,
metrics: newMetrics(), metrics: newMetrics(),
tracer: tracer, tracer: tracer,
caching: forwarderCaching,
validStamp: validStamp,
} }
} }
...@@ -154,16 +158,22 @@ func (s *Service) RetrieveChunk(ctx context.Context, addr swarm.Address, origin ...@@ -154,16 +158,22 @@ func (s *Service) RetrieveChunk(ctx context.Context, addr swarm.Address, origin
defer cancel() defer cancel()
chunk, peer, requested, err := s.retrieveChunk(ctx, addr, sp, origin) chunk, peer, requested, err := s.retrieveChunk(ctx, addr, sp, origin)
resultC <- retrievalResult{ select {
case resultC <- retrievalResult{
chunk: chunk, chunk: chunk,
peer: peer, peer: peer,
err: err, err: err,
retrieved: requested, retrieved: requested,
}:
case <-ctx.Done():
} }
}() }()
} else { } else {
resultC <- retrievalResult{} select {
case resultC <- retrievalResult{}:
case <-ctx.Done():
}
} }
select { select {
...@@ -410,6 +420,8 @@ func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (e ...@@ -410,6 +420,8 @@ func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (e
ctx = context.WithValue(ctx, requestSourceContextKey{}, p.Address.String()) ctx = context.WithValue(ctx, requestSourceContextKey{}, p.Address.String())
addr := swarm.NewAddress(req.Addr) addr := swarm.NewAddress(req.Addr)
forwarded := false
chunk, err := s.storer.Get(ctx, storage.ModeGetRequest, addr) chunk, err := s.storer.Get(ctx, storage.ModeGetRequest, addr)
if err != nil { if err != nil {
if errors.Is(err, storage.ErrNotFound) { if errors.Is(err, storage.ErrNotFound) {
...@@ -418,11 +430,11 @@ func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (e ...@@ -418,11 +430,11 @@ func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (e
if err != nil { if err != nil {
return fmt.Errorf("retrieve chunk: %w", err) return fmt.Errorf("retrieve chunk: %w", err)
} }
forwarded = true
} else { } else {
return fmt.Errorf("get from store: %w", err) return fmt.Errorf("get from store: %w", err)
} }
} }
stamp, err := chunk.Stamp().MarshalBinary() stamp, err := chunk.Stamp().MarshalBinary()
if err != nil { if err != nil {
return fmt.Errorf("stamp marshal: %w", err) return fmt.Errorf("stamp marshal: %w", err)
...@@ -443,6 +455,28 @@ func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (e ...@@ -443,6 +455,28 @@ func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (e
} }
s.logger.Tracef("retrieval protocol debiting peer %s", p.Address.String()) s.logger.Tracef("retrieval protocol debiting peer %s", p.Address.String())
// debit price from p's balance // debit price from p's balance
return debit.Apply() if err := debit.Apply(); err != nil {
return fmt.Errorf("apply debit: %w", err)
}
// cache the request last, so that putting to the localstore does not slow down the request flow
if s.caching && forwarded {
putMode := storage.ModePutRequest
cch, err := s.validStamp(chunk, stamp)
if err != nil {
// if a chunk with an invalid postage stamp was received
// we force it into the cache.
putMode = storage.ModePutRequestCache
cch = chunk
}
_, err = s.storer.Put(ctx, putMode, cch)
if err != nil {
return fmt.Errorf("retrieve cache put: %w", err)
}
}
return nil
} }
...@@ -61,7 +61,7 @@ func TestDelivery(t *testing.T) { ...@@ -61,7 +61,7 @@ func TestDelivery(t *testing.T) {
} }
// create the server that will handle the request and will serve the response // create the server that will handle the request and will serve the response
server := retrieval.New(swarm.MustParseHexAddress("0034"), mockStorer, nil, nil, logger, serverMockAccounting, pricerMock, nil) server := retrieval.New(swarm.MustParseHexAddress("0034"), mockStorer, nil, nil, logger, serverMockAccounting, pricerMock, nil, false, noopStampValidator)
recorder := streamtest.New( recorder := streamtest.New(
streamtest.WithProtocols(server.Protocol()), streamtest.WithProtocols(server.Protocol()),
streamtest.WithBaseAddr(clientAddr), streamtest.WithBaseAddr(clientAddr),
...@@ -78,7 +78,7 @@ func TestDelivery(t *testing.T) { ...@@ -78,7 +78,7 @@ func TestDelivery(t *testing.T) {
return nil return nil
}} }}
client := retrieval.New(clientAddr, clientMockStorer, recorder, ps, logger, clientMockAccounting, pricerMock, nil) client := retrieval.New(clientAddr, clientMockStorer, recorder, ps, logger, clientMockAccounting, pricerMock, nil, false, noopStampValidator)
ctx, cancel := context.WithTimeout(context.Background(), testTimeout) ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel() defer cancel()
v, err := client.RetrieveChunk(ctx, chunk.Address(), true) v, err := client.RetrieveChunk(ctx, chunk.Address(), true)
...@@ -167,14 +167,14 @@ func TestRetrieveChunk(t *testing.T) { ...@@ -167,14 +167,14 @@ func TestRetrieveChunk(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
server := retrieval.New(serverAddress, serverStorer, nil, nil, logger, accountingmock.NewAccounting(), pricer, nil) server := retrieval.New(serverAddress, serverStorer, nil, nil, logger, accountingmock.NewAccounting(), pricer, nil, false, noopStampValidator)
recorder := streamtest.New(streamtest.WithProtocols(server.Protocol())) recorder := streamtest.New(streamtest.WithProtocols(server.Protocol()))
clientSuggester := mockPeerSuggester{eachPeerRevFunc: func(f topology.EachPeerFunc) error { clientSuggester := mockPeerSuggester{eachPeerRevFunc: func(f topology.EachPeerFunc) error {
_, _, _ = f(serverAddress, 0) _, _, _ = f(serverAddress, 0)
return nil return nil
}} }}
client := retrieval.New(clientAddress, nil, recorder, clientSuggester, logger, accountingmock.NewAccounting(), pricer, nil) client := retrieval.New(clientAddress, nil, recorder, clientSuggester, logger, accountingmock.NewAccounting(), pricer, nil, false, noopStampValidator)
got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true) got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true)
if err != nil { if err != nil {
...@@ -207,11 +207,15 @@ func TestRetrieveChunk(t *testing.T) { ...@@ -207,11 +207,15 @@ func TestRetrieveChunk(t *testing.T) {
accountingmock.NewAccounting(), accountingmock.NewAccounting(),
pricer, pricer,
nil, nil,
false,
noopStampValidator,
) )
forwarderStore := storemock.NewStorer()
forwarder := retrieval.New( forwarder := retrieval.New(
forwarderAddress, forwarderAddress,
storemock.NewStorer(), // no chunk in forwarder's store forwarderStore, // no chunk in forwarder's store
streamtest.New(streamtest.WithProtocols(server.Protocol())), // connect to server streamtest.New(streamtest.WithProtocols(server.Protocol())), // connect to server
mockPeerSuggester{eachPeerRevFunc: func(f topology.EachPeerFunc) error { mockPeerSuggester{eachPeerRevFunc: func(f topology.EachPeerFunc) error {
_, _, _ = f(serverAddress, 0) // suggest server's address _, _, _ = f(serverAddress, 0) // suggest server's address
...@@ -221,6 +225,8 @@ func TestRetrieveChunk(t *testing.T) { ...@@ -221,6 +225,8 @@ func TestRetrieveChunk(t *testing.T) {
accountingmock.NewAccounting(), accountingmock.NewAccounting(),
pricer, pricer,
nil, nil,
true, // note explicit caching
noopStampValidator,
) )
client := retrieval.New( client := retrieval.New(
...@@ -235,8 +241,14 @@ func TestRetrieveChunk(t *testing.T) { ...@@ -235,8 +241,14 @@ func TestRetrieveChunk(t *testing.T) {
accountingmock.NewAccounting(), accountingmock.NewAccounting(),
pricer, pricer,
nil, nil,
false,
noopStampValidator,
) )
if got, _ := forwarderStore.Has(context.Background(), chunk.Address()); got {
t.Fatalf("forwarder node already has chunk")
}
got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true) got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
...@@ -244,6 +256,11 @@ func TestRetrieveChunk(t *testing.T) { ...@@ -244,6 +256,11 @@ func TestRetrieveChunk(t *testing.T) {
if !bytes.Equal(got.Data(), chunk.Data()) { if !bytes.Equal(got.Data(), chunk.Data()) {
t.Fatalf("got data %x, want %x", got.Data(), chunk.Data()) t.Fatalf("got data %x, want %x", got.Data(), chunk.Data())
} }
if got, _ := forwarderStore.Has(context.Background(), chunk.Address()); !got {
t.Fatalf("forwarder did not cache chunk")
}
}) })
} }
...@@ -301,8 +318,8 @@ func TestRetrievePreemptiveRetry(t *testing.T) { ...@@ -301,8 +318,8 @@ func TestRetrievePreemptiveRetry(t *testing.T) {
return peerSuggester return peerSuggester
} }
server1 := retrieval.New(serverAddress1, serverStorer1, nil, noPeerSuggester, logger, accountingmock.NewAccounting(), pricerMock, nil) server1 := retrieval.New(serverAddress1, serverStorer1, nil, noPeerSuggester, logger, accountingmock.NewAccounting(), pricerMock, nil, false, noopStampValidator)
server2 := retrieval.New(serverAddress2, serverStorer2, nil, noPeerSuggester, logger, accountingmock.NewAccounting(), pricerMock, nil) server2 := retrieval.New(serverAddress2, serverStorer2, nil, noPeerSuggester, logger, accountingmock.NewAccounting(), pricerMock, nil, false, noopStampValidator)
t.Run("peer not reachable", func(t *testing.T) { t.Run("peer not reachable", func(t *testing.T) {
ranOnce := true ranOnce := true
...@@ -330,7 +347,7 @@ func TestRetrievePreemptiveRetry(t *testing.T) { ...@@ -330,7 +347,7 @@ func TestRetrievePreemptiveRetry(t *testing.T) {
streamtest.WithBaseAddr(clientAddress), streamtest.WithBaseAddr(clientAddress),
) )
client := retrieval.New(clientAddress, nil, recorder, peerSuggesterFn(peers...), logger, accountingmock.NewAccounting(), pricerMock, nil) client := retrieval.New(clientAddress, nil, recorder, peerSuggesterFn(peers...), logger, accountingmock.NewAccounting(), pricerMock, nil, false, noopStampValidator)
got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true) got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true)
if err != nil { if err != nil {
...@@ -366,7 +383,7 @@ func TestRetrievePreemptiveRetry(t *testing.T) { ...@@ -366,7 +383,7 @@ func TestRetrievePreemptiveRetry(t *testing.T) {
), ),
) )
client := retrieval.New(clientAddress, nil, recorder, peerSuggesterFn(peers...), logger, accountingmock.NewAccounting(), pricerMock, nil) client := retrieval.New(clientAddress, nil, recorder, peerSuggesterFn(peers...), logger, accountingmock.NewAccounting(), pricerMock, nil, false, noopStampValidator)
got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true) got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true)
if err != nil { if err != nil {
...@@ -395,8 +412,8 @@ func TestRetrievePreemptiveRetry(t *testing.T) { ...@@ -395,8 +412,8 @@ func TestRetrievePreemptiveRetry(t *testing.T) {
server1MockAccounting := accountingmock.NewAccounting() server1MockAccounting := accountingmock.NewAccounting()
server2MockAccounting := accountingmock.NewAccounting() server2MockAccounting := accountingmock.NewAccounting()
server1 := retrieval.New(serverAddress1, serverStorer1, nil, noPeerSuggester, logger, server1MockAccounting, pricerMock, nil) server1 := retrieval.New(serverAddress1, serverStorer1, nil, noPeerSuggester, logger, server1MockAccounting, pricerMock, nil, false, noopStampValidator)
server2 := retrieval.New(serverAddress2, serverStorer2, nil, noPeerSuggester, logger, server2MockAccounting, pricerMock, nil) server2 := retrieval.New(serverAddress2, serverStorer2, nil, noPeerSuggester, logger, server2MockAccounting, pricerMock, nil, false, noopStampValidator)
// NOTE: must be more than retry duration // NOTE: must be more than retry duration
// (here one second more) // (here one second more)
...@@ -430,7 +447,7 @@ func TestRetrievePreemptiveRetry(t *testing.T) { ...@@ -430,7 +447,7 @@ func TestRetrievePreemptiveRetry(t *testing.T) {
clientMockAccounting := accountingmock.NewAccounting() clientMockAccounting := accountingmock.NewAccounting()
client := retrieval.New(clientAddress, nil, recorder, peerSuggesterFn(peers...), logger, clientMockAccounting, pricerMock, nil) client := retrieval.New(clientAddress, nil, recorder, peerSuggesterFn(peers...), logger, clientMockAccounting, pricerMock, nil, false, noopStampValidator)
got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true) got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true)
if err != nil { if err != nil {
...@@ -468,21 +485,25 @@ func TestRetrievePreemptiveRetry(t *testing.T) { ...@@ -468,21 +485,25 @@ func TestRetrievePreemptiveRetry(t *testing.T) {
t.Run("peer forwards request", func(t *testing.T) { t.Run("peer forwards request", func(t *testing.T) {
// server 2 has the chunk // server 2 has the chunk
server2 := retrieval.New(serverAddress2, serverStorer2, nil, noPeerSuggester, logger, accountingmock.NewAccounting(), pricerMock, nil) server2 := retrieval.New(serverAddress2, serverStorer2, nil, noPeerSuggester, logger, accountingmock.NewAccounting(), pricerMock, nil, false, noopStampValidator)
server1Recorder := streamtest.New( server1Recorder := streamtest.New(
streamtest.WithProtocols(server2.Protocol()), streamtest.WithProtocols(server2.Protocol()),
) )
// server 1 will forward request to server 2 // server 1 will forward request to server 2
server1 := retrieval.New(serverAddress1, serverStorer1, server1Recorder, peerSuggesterFn(serverAddress2), logger, accountingmock.NewAccounting(), pricerMock, nil) server1 := retrieval.New(serverAddress1, serverStorer1, server1Recorder, peerSuggesterFn(serverAddress2), logger, accountingmock.NewAccounting(), pricerMock, nil, true, noopStampValidator)
clientRecorder := streamtest.New( clientRecorder := streamtest.New(
streamtest.WithProtocols(server1.Protocol()), streamtest.WithProtocols(server1.Protocol()),
) )
// client only knows about server 1 // client only knows about server 1
client := retrieval.New(clientAddress, nil, clientRecorder, peerSuggesterFn(serverAddress1), logger, accountingmock.NewAccounting(), pricerMock, nil) client := retrieval.New(clientAddress, nil, clientRecorder, peerSuggesterFn(serverAddress1), logger, accountingmock.NewAccounting(), pricerMock, nil, false, noopStampValidator)
if got, _ := serverStorer1.Has(context.Background(), chunk.Address()); got {
t.Fatalf("forwarder node already has chunk")
}
got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true) got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true)
if err != nil { if err != nil {
...@@ -492,6 +513,18 @@ func TestRetrievePreemptiveRetry(t *testing.T) { ...@@ -492,6 +513,18 @@ func TestRetrievePreemptiveRetry(t *testing.T) {
if !bytes.Equal(got.Data(), chunk.Data()) { if !bytes.Equal(got.Data(), chunk.Data()) {
t.Fatalf("got data %x, want %x", got.Data(), chunk.Data()) t.Fatalf("got data %x, want %x", got.Data(), chunk.Data())
} }
has := false
for i := 0; i < 10; i++ {
has, _ = serverStorer1.Has(context.Background(), chunk.Address())
if has {
break
}
time.Sleep(100 * time.Millisecond)
}
if !has {
t.Fatalf("forwarder node does not have chunk")
}
}) })
} }
...@@ -505,3 +538,7 @@ func (s mockPeerSuggester) EachPeer(topology.EachPeerFunc) error { ...@@ -505,3 +538,7 @@ func (s mockPeerSuggester) EachPeer(topology.EachPeerFunc) error {
func (s mockPeerSuggester) EachPeerRev(f topology.EachPeerFunc) error { func (s mockPeerSuggester) EachPeerRev(f topology.EachPeerFunc) error {
return s.eachPeerRevFunc(f) return s.eachPeerRevFunc(f)
} }
var noopStampValidator = func(chunk swarm.Chunk, stampBytes []byte) (swarm.Chunk, error) {
return chunk, nil
}
...@@ -239,7 +239,7 @@ func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (e ...@@ -239,7 +239,7 @@ func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (e
} }
// Pay initiates a payment to the given peer // Pay initiates a payment to the given peer
func (s *Service) Pay(ctx context.Context, peer swarm.Address, amount *big.Int, checkAllowance *big.Int) (*big.Int, int64, error) { func (s *Service) Pay(ctx context.Context, peer swarm.Address, amount, checkAllowance *big.Int) (*big.Int, int64, error) {
ctx, cancel := context.WithTimeout(ctx, 5*time.Second) ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel() defer cancel()
......
...@@ -41,7 +41,7 @@ type notifyPaymentSentCall struct { ...@@ -41,7 +41,7 @@ type notifyPaymentSentCall struct {
err error err error
} }
func newTestObserver(debtAmounts map[string]*big.Int, shadowBalanceAmounts map[string]*big.Int) *testObserver { func newTestObserver(debtAmounts, shadowBalanceAmounts map[string]*big.Int) *testObserver {
return &testObserver{ return &testObserver{
receivedCalled: make(chan notifyPaymentReceivedCall, 1), receivedCalled: make(chan notifyPaymentReceivedCall, 1),
sentCalled: make(chan notifyPaymentSentCall, 1), sentCalled: make(chan notifyPaymentSentCall, 1),
......
...@@ -41,7 +41,7 @@ var ( ...@@ -41,7 +41,7 @@ var (
// ChequeStore handles the verification and storage of received cheques // ChequeStore handles the verification and storage of received cheques
type ChequeStore interface { type ChequeStore interface {
// ReceiveCheque verifies and stores a cheque. It returns the total amount earned. // ReceiveCheque verifies and stores a cheque. It returns the total amount earned.
ReceiveCheque(ctx context.Context, cheque *SignedCheque, exchangeRate *big.Int, deduction *big.Int) (*big.Int, error) ReceiveCheque(ctx context.Context, cheque *SignedCheque, exchangeRate, deduction *big.Int) (*big.Int, error)
// LastCheque returns the last cheque we received from a specific chequebook. // LastCheque returns the last cheque we received from a specific chequebook.
LastCheque(chequebook common.Address) (*SignedCheque, error) LastCheque(chequebook common.Address) (*SignedCheque, error)
// LastCheques returns the last received cheques from every known chequebook. // LastCheques returns the last received cheques from every known chequebook.
...@@ -98,7 +98,7 @@ func (s *chequeStore) LastCheque(chequebook common.Address) (*SignedCheque, erro ...@@ -98,7 +98,7 @@ func (s *chequeStore) LastCheque(chequebook common.Address) (*SignedCheque, erro
} }
// ReceiveCheque verifies and stores a cheque. It returns the totam amount earned. // ReceiveCheque verifies and stores a cheque. It returns the totam amount earned.
func (s *chequeStore) ReceiveCheque(ctx context.Context, cheque *SignedCheque, exchangeRate *big.Int, deduction *big.Int) (*big.Int, error) { func (s *chequeStore) ReceiveCheque(ctx context.Context, cheque *SignedCheque, exchangeRate, deduction *big.Int) (*big.Int, error) {
// verify we are the beneficiary // verify we are the beneficiary
if cheque.Beneficiary != s.beneficiary { if cheque.Beneficiary != s.beneficiary {
return nil, ErrWrongBeneficiary return nil, ErrWrongBeneficiary
......
...@@ -141,7 +141,7 @@ LOOP: ...@@ -141,7 +141,7 @@ LOOP:
return nil return nil
} }
func (c *factory) verifyChequebookAgainstFactory(ctx context.Context, factory common.Address, chequebook common.Address) (bool, error) { func (c *factory) verifyChequebookAgainstFactory(ctx context.Context, factory, chequebook common.Address) (bool, error) {
callData, err := factoryABI.Pack("deployedContracts", chequebook) callData, err := factoryABI.Pack("deployedContracts", chequebook)
if err != nil { if err != nil {
return false, err return false, err
...@@ -227,27 +227,3 @@ func (c *factory) ERC20Address(ctx context.Context) (common.Address, error) { ...@@ -227,27 +227,3 @@ func (c *factory) ERC20Address(ctx context.Context) (common.Address, error) {
} }
return *erc20Address, nil return *erc20Address, nil
} }
var (
GoerliChainID = int64(5)
GoerliFactoryAddress = common.HexToAddress("0x73c412512E1cA0be3b89b77aB3466dA6A1B9d273")
GoerliLegacyFactoryAddress = common.HexToAddress("0xf0277caffea72734853b834afc9892461ea18474")
XDaiChainID = int64(100)
XDaiFactoryAddress = common.HexToAddress("0xc2d5a532cf69aa9a1378737d8ccdef884b6e7420")
)
// DiscoverFactoryAddress returns the canonical factory for this chainID
func DiscoverFactoryAddress(chainID int64) (currentFactory common.Address, legacyFactories []common.Address, found bool) {
switch chainID {
case GoerliChainID:
// goerli
return GoerliFactoryAddress, []common.Address{
GoerliLegacyFactoryAddress,
}, true
case XDaiChainID:
// xdai
return XDaiFactoryAddress, []common.Address{}, true
default:
return common.Address{}, nil, false
}
}
...@@ -131,11 +131,9 @@ func Init( ...@@ -131,11 +131,9 @@ func Init(
} }
if err == storage.ErrNotFound { if err == storage.ErrNotFound {
logger.Info("no chequebook found, deploying new one.") logger.Info("no chequebook found, deploying new one.")
if swapInitialDeposit.Cmp(big.NewInt(0)) != 0 { err = checkBalance(ctx, logger, swapInitialDeposit, swapBackend, chainId, overlayEthAddress, erc20Service)
err = checkBalance(ctx, logger, swapInitialDeposit, swapBackend, chainId, overlayEthAddress, erc20Service) if err != nil {
if err != nil { return nil, err
return nil, err
}
} }
nonce := make([]byte, 32) nonce := make([]byte, 32)
......
...@@ -46,7 +46,7 @@ func NewChequeStore(opts ...Option) chequebook.ChequeStore { ...@@ -46,7 +46,7 @@ func NewChequeStore(opts ...Option) chequebook.ChequeStore {
return mock return mock
} }
func (s *Service) ReceiveCheque(ctx context.Context, cheque *chequebook.SignedCheque, exchangeRate *big.Int, deduction *big.Int) (*big.Int, error) { func (s *Service) ReceiveCheque(ctx context.Context, cheque *chequebook.SignedCheque, exchangeRate, deduction *big.Int) (*big.Int, error) {
return s.receiveCheque(ctx, cheque, exchangeRate, deduction) return s.receiveCheque(ctx, cheque, exchangeRate, deduction)
} }
......
...@@ -33,7 +33,7 @@ func MakeSettlementHeaders(exchangeRate, deduction *big.Int) p2p.Headers { ...@@ -33,7 +33,7 @@ func MakeSettlementHeaders(exchangeRate, deduction *big.Int) p2p.Headers {
} }
} }
func ParseSettlementResponseHeaders(receivedHeaders p2p.Headers) (exchange *big.Int, deduction *big.Int, err error) { func ParseSettlementResponseHeaders(receivedHeaders p2p.Headers) (exchange, deduction *big.Int, err error) {
exchangeRate, err := ParseExchangeHeader(receivedHeaders) exchangeRate, err := ParseExchangeHeader(receivedHeaders)
if err != nil { if err != nil {
......
...@@ -42,7 +42,7 @@ type Service struct { ...@@ -42,7 +42,7 @@ type Service struct {
cashoutStatusFunc func(ctx context.Context, peer swarm.Address) (*chequebook.CashoutStatus, error) cashoutStatusFunc func(ctx context.Context, peer swarm.Address) (*chequebook.CashoutStatus, error)
} }
// WithsettlementFunc sets the mock settlement function // WithSettlementSentFunc sets the mock settlement function
func WithSettlementSentFunc(f func(swarm.Address) (*big.Int, error)) Option { func WithSettlementSentFunc(f func(swarm.Address) (*big.Int, error)) Option {
return optionFunc(func(s *Service) { return optionFunc(func(s *Service) {
s.settlementSentFunc = f s.settlementSentFunc = f
...@@ -55,7 +55,7 @@ func WithSettlementRecvFunc(f func(swarm.Address) (*big.Int, error)) Option { ...@@ -55,7 +55,7 @@ func WithSettlementRecvFunc(f func(swarm.Address) (*big.Int, error)) Option {
}) })
} }
// WithsettlementsFunc sets the mock settlements function // WithSettlementsSentFunc sets the mock settlements function
func WithSettlementsSentFunc(f func() (map[string]*big.Int, error)) Option { func WithSettlementsSentFunc(f func() (map[string]*big.Int, error)) Option {
return optionFunc(func(s *Service) { return optionFunc(func(s *Service) {
s.settlementsSentFunc = f s.settlementsSentFunc = f
...@@ -247,7 +247,7 @@ func (s *Service) CashoutStatus(ctx context.Context, peer swarm.Address) (*chequ ...@@ -247,7 +247,7 @@ func (s *Service) CashoutStatus(ctx context.Context, peer swarm.Address) (*chequ
return nil, nil return nil, nil
} }
func (s *Service) ReceiveCheque(ctx context.Context, peer swarm.Address, cheque *chequebook.SignedCheque, exchangeRate *big.Int, deduction *big.Int) (err error) { func (s *Service) ReceiveCheque(ctx context.Context, peer swarm.Address, cheque *chequebook.SignedCheque, exchangeRate, deduction *big.Int) (err error) {
defer func() { defer func() {
if err == nil { if err == nil {
s.deductionForPeers[peer.String()] = struct{}{} s.deductionForPeers[peer.String()] = struct{}{}
......
...@@ -16,7 +16,7 @@ type Service struct { ...@@ -16,7 +16,7 @@ type Service struct {
deduct *big.Int deduct *big.Int
} }
func New(rate *big.Int, deduct *big.Int) Service { func New(rate, deduct *big.Int) Service {
return Service{ return Service{
rate: rate, rate: rate,
deduct: deduct, deduct: deduct,
...@@ -30,7 +30,7 @@ func (s Service) GetPrice(ctx context.Context) (*big.Int, *big.Int, error) { ...@@ -30,7 +30,7 @@ func (s Service) GetPrice(ctx context.Context) (*big.Int, *big.Int, error) {
return s.rate, s.deduct, nil return s.rate, s.deduct, nil
} }
func (s Service) CurrentRates() (exchangeRate *big.Int, deduction *big.Int, err error) { func (s Service) CurrentRates() (exchangeRate, deduction *big.Int, err error) {
return s.rate, s.deduct, nil return s.rate, s.deduct, nil
} }
...@@ -42,7 +42,7 @@ func DiscoverPriceOracleAddress(chainID int64) (priceOracleAddress common.Addres ...@@ -42,7 +42,7 @@ func DiscoverPriceOracleAddress(chainID int64) (priceOracleAddress common.Addres
return common.Address{}, false return common.Address{}, false
} }
func (s Service) SetValues(rate *big.Int, deduct *big.Int) { func (s Service) SetValues(rate, deduct *big.Int) {
s.rate = rate s.rate = rate
s.deduct = deduct s.deduct = deduct
} }
...@@ -131,7 +131,7 @@ func (s *service) GetPrice(ctx context.Context) (*big.Int, *big.Int, error) { ...@@ -131,7 +131,7 @@ func (s *service) GetPrice(ctx context.Context) (*big.Int, *big.Int, error) {
return exchangeRate, deduction, nil return exchangeRate, deduction, nil
} }
func (s *service) CurrentRates() (exchangeRate *big.Int, deduction *big.Int, err error) { func (s *service) CurrentRates() (exchangeRate, deduction *big.Int, err error) {
if s.exchangeRate.Cmp(big.NewInt(0)) == 0 { if s.exchangeRate.Cmp(big.NewInt(0)) == 0 {
return nil, nil, errors.New("exchange rate not yet available") return nil, nil, errors.New("exchange rate not yet available")
} }
...@@ -145,21 +145,3 @@ func (s *service) Close() error { ...@@ -145,21 +145,3 @@ func (s *service) Close() error {
close(s.quitC) close(s.quitC)
return nil return nil
} }
var (
goerliChainID = int64(5)
goerliContractAddress = common.HexToAddress("0x0c9de531dcb38b758fe8a2c163444a5e54ee0db2")
xdaiChainID = int64(100)
xdaiContractAddress = common.HexToAddress("0x0FDc5429C50e2a39066D8A94F3e2D2476fcc3b85")
)
// DiscoverPriceOracleAddress returns the canonical price oracle for this chainID
func DiscoverPriceOracleAddress(chainID int64) (priceOracleAddress common.Address, found bool) {
switch chainID {
case goerliChainID:
return goerliContractAddress, true
case xdaiChainID:
return xdaiContractAddress, true
}
return common.Address{}, false
}
...@@ -75,7 +75,7 @@ func New(proto swapprotocol.Interface, logger logging.Logger, store storage.Stat ...@@ -75,7 +75,7 @@ func New(proto swapprotocol.Interface, logger logging.Logger, store storage.Stat
} }
// ReceiveCheque is called by the swap protocol if a cheque is received. // ReceiveCheque is called by the swap protocol if a cheque is received.
func (s *Service) ReceiveCheque(ctx context.Context, peer swarm.Address, cheque *chequebook.SignedCheque, exchangeRate *big.Int, deduction *big.Int) (err error) { func (s *Service) ReceiveCheque(ctx context.Context, peer swarm.Address, cheque *chequebook.SignedCheque, exchangeRate, deduction *big.Int) (err error) {
// check this is the same chequebook for this peer as previously // check this is the same chequebook for this peer as previously
expectedChequebook, known, err := s.addressbook.Chequebook(peer) expectedChequebook, known, err := s.addressbook.Chequebook(peer)
if err != nil { if err != nil {
......
...@@ -50,7 +50,7 @@ type Interface interface { ...@@ -50,7 +50,7 @@ type Interface interface {
// Swap is the interface the settlement layer should implement to receive cheques. // Swap is the interface the settlement layer should implement to receive cheques.
type Swap interface { type Swap interface {
// ReceiveCheque is called by the swap protocol if a cheque is received. // ReceiveCheque is called by the swap protocol if a cheque is received.
ReceiveCheque(ctx context.Context, peer swarm.Address, cheque *chequebook.SignedCheque, exchangeRate *big.Int, deduction *big.Int) error ReceiveCheque(ctx context.Context, peer swarm.Address, cheque *chequebook.SignedCheque, exchangeRate, deduction *big.Int) error
// Handshake is called by the swap protocol when a handshake is received. // Handshake is called by the swap protocol when a handshake is received.
Handshake(peer swarm.Address, beneficiary common.Address) error Handshake(peer swarm.Address, beneficiary common.Address) error
GetDeductionForPeer(peer swarm.Address) (bool, error) GetDeductionForPeer(peer swarm.Address) (bool, error)
......
...@@ -296,7 +296,7 @@ func encodeInt64Append(buffer *[]byte, val int64) { ...@@ -296,7 +296,7 @@ func encodeInt64Append(buffer *[]byte, val int64) {
} }
func decodeInt64Splice(buffer *[]byte) int64 { func decodeInt64Splice(buffer *[]byte) int64 {
val, n := binary.Varint((*buffer)) val, n := binary.Varint(*buffer)
*buffer = (*buffer)[n:] *buffer = (*buffer)[n:]
return val return val
} }
......
...@@ -66,7 +66,6 @@ var noopSanctionedPeerFn = func(_ swarm.Address) bool { return false } ...@@ -66,7 +66,6 @@ var noopSanctionedPeerFn = func(_ swarm.Address) bool { return false }
type Options struct { type Options struct {
SaturationFunc binSaturationFunc SaturationFunc binSaturationFunc
Bootnodes []ma.Multiaddr Bootnodes []ma.Multiaddr
StandaloneMode bool
BootnodeMode bool BootnodeMode bool
BitSuffixLength int BitSuffixLength int
} }
...@@ -90,7 +89,6 @@ type Kad struct { ...@@ -90,7 +89,6 @@ type Kad struct {
peerSig []chan struct{} peerSig []chan struct{}
peerSigMtx sync.Mutex peerSigMtx sync.Mutex
logger logging.Logger // logger logger logging.Logger // logger
standalone bool // indicates whether the node is working in standalone mode
bootnode bool // indicates whether the node is working in bootnode mode bootnode bool // indicates whether the node is working in bootnode mode
collector *im.Collector collector *im.Collector
quit chan struct{} // quit channel quit chan struct{} // quit channel
...@@ -136,7 +134,6 @@ func New( ...@@ -136,7 +134,6 @@ func New(
manageC: make(chan struct{}, 1), manageC: make(chan struct{}, 1),
waitNext: waitnext.New(), waitNext: waitnext.New(),
logger: logger, logger: logger,
standalone: o.StandaloneMode,
bootnode: o.BootnodeMode, bootnode: o.BootnodeMode,
collector: im.NewCollector(metricsDB), collector: im.NewCollector(metricsDB),
quit: make(chan struct{}), quit: make(chan struct{}),
...@@ -155,10 +152,10 @@ func New( ...@@ -155,10 +152,10 @@ func New(
func (k *Kad) generateCommonBinPrefixes() { func (k *Kad) generateCommonBinPrefixes() {
bitCombinationsCount := int(math.Pow(2, float64(k.bitSuffixLength))) bitCombinationsCount := int(math.Pow(2, float64(k.bitSuffixLength)))
bitSufixes := make([]uint8, bitCombinationsCount) bitSuffixes := make([]uint8, bitCombinationsCount)
for i := 0; i < bitCombinationsCount; i++ { for i := 0; i < bitCombinationsCount; i++ {
bitSufixes[i] = uint8(i) bitSuffixes[i] = uint8(i)
} }
addr := swarm.MustParseHexAddress(k.base.String()) addr := swarm.MustParseHexAddress(k.base.String())
...@@ -197,7 +194,7 @@ func (k *Kad) generateCommonBinPrefixes() { ...@@ -197,7 +194,7 @@ func (k *Kad) generateCommonBinPrefixes() {
for l := i + 1; l < i+k.bitSuffixLength+1; l++ { for l := i + 1; l < i+k.bitSuffixLength+1; l++ {
index, pos := l/8, l%8 index, pos := l/8, l%8
if hasBit(bitSufixes[j], uint8(bitSuffixPos)) { if hasBit(bitSuffixes[j], uint8(bitSuffixPos)) {
pseudoAddrBytes[index] = bits.Reverse8(setBit(bits.Reverse8(pseudoAddrBytes[index]), uint8(pos))) pseudoAddrBytes[index] = bits.Reverse8(setBit(bits.Reverse8(pseudoAddrBytes[index]), uint8(pos)))
} else { } else {
pseudoAddrBytes[index] = bits.Reverse8(clearBit(bits.Reverse8(pseudoAddrBytes[index]), uint8(pos))) pseudoAddrBytes[index] = bits.Reverse8(clearBit(bits.Reverse8(pseudoAddrBytes[index]), uint8(pos)))
...@@ -468,7 +465,7 @@ func (k *Kad) connectionAttemptsHandler(ctx context.Context, wg *sync.WaitGroup, ...@@ -468,7 +465,7 @@ func (k *Kad) connectionAttemptsHandler(ctx context.Context, wg *sync.WaitGroup,
} }
} }
} }
for i := 0; i < 64; i++ { for i := 0; i < 32; i++ {
go connAttempt(peerConnChan) go connAttempt(peerConnChan)
} }
for i := 0; i < 8; i++ { for i := 0; i < 8; i++ {
...@@ -529,10 +526,6 @@ func (k *Kad) manage() { ...@@ -529,10 +526,6 @@ func (k *Kad) manage() {
default: default:
} }
if k.standalone {
continue
}
oldDepth := k.NeighborhoodDepth() oldDepth := k.NeighborhoodDepth()
k.connectNeighbours(&wg, peerConnChan, peerConnChan2) k.connectNeighbours(&wg, peerConnChan, peerConnChan2)
k.connectBalanced(&wg, peerConnChan2) k.connectBalanced(&wg, peerConnChan2)
......
...@@ -107,7 +107,7 @@ func (d *mock) Peers() []swarm.Address { ...@@ -107,7 +107,7 @@ func (d *mock) Peers() []swarm.Address {
return d.peers return d.peers
} }
func (d *mock) ClosestPeer(addr swarm.Address, _ bool, skipPeers ...swarm.Address) (peerAddr swarm.Address, err error) { func (d *mock) ClosestPeer(addr swarm.Address, wantSelf bool, skipPeers ...swarm.Address) (peerAddr swarm.Address, err error) {
if len(skipPeers) == 0 { if len(skipPeers) == 0 {
if d.closestPeerErr != nil { if d.closestPeerErr != nil {
return d.closestPeer, d.closestPeerErr return d.closestPeer, d.closestPeerErr
...@@ -147,8 +147,13 @@ func (d *mock) ClosestPeer(addr swarm.Address, _ bool, skipPeers ...swarm.Addres ...@@ -147,8 +147,13 @@ func (d *mock) ClosestPeer(addr swarm.Address, _ bool, skipPeers ...swarm.Addres
} }
if peerAddr.IsZero() { if peerAddr.IsZero() {
return peerAddr, topology.ErrNotFound if wantSelf {
return peerAddr, topology.ErrWantSelf
} else {
return peerAddr, topology.ErrNotFound
}
} }
return peerAddr, nil return peerAddr, nil
} }
......
...@@ -34,7 +34,7 @@ Once the operation is finished, the open span should be finished: ...@@ -34,7 +34,7 @@ Once the operation is finished, the open span should be finished:
span.Finish() span.Finish()
The tracing package also provides a function for creating a logger which will The tracing package also provides a function for creating a logger which will
inject a "traceid" field entry to the log line, which helps in finding out which inject a "traceID" field entry to the log line, which helps in finding out which
log lines belong to a specific trace. log lines belong to a specific trace.
To create a logger with trace just wrap an existing logger: To create a logger with trace just wrap an existing logger:
...@@ -46,6 +46,6 @@ To create a logger with trace just wrap an existing logger: ...@@ -46,6 +46,6 @@ To create a logger with trace just wrap an existing logger:
Which will result in following log line (if the context contains tracing Which will result in following log line (if the context contains tracing
information): information):
time="2015-09-07T08:48:33Z" level=info msg="some message" traceid=ed65818cc1d30c time="2015-09-07T08:48:33Z" level=info msg="some message" traceID=ed65818cc1d30c
*/ */
package tracing package tracing
...@@ -34,7 +34,7 @@ var ( ...@@ -34,7 +34,7 @@ var (
type contextKey struct{} type contextKey struct{}
// LogField is the key in log message field that holds tracing id value. // LogField is the key in log message field that holds tracing id value.
const LogField = "traceid" const LogField = "traceID"
const ( const (
// TraceContextHeaderName is the http header name used to propagate tracing context. // TraceContextHeaderName is the http header name used to propagate tracing context.
...@@ -91,7 +91,7 @@ func NewTracer(o *Options) (*Tracer, io.Closer, error) { ...@@ -91,7 +91,7 @@ func NewTracer(o *Options) (*Tracer, io.Closer, error) {
// StartSpanFromContext starts a new tracing span that is either a root one or a // StartSpanFromContext starts a new tracing span that is either a root one or a
// child of existing one from the provided Context. If logger is provided, a new // child of existing one from the provided Context. If logger is provided, a new
// log Entry will be returned with "traceid" log field. // log Entry will be returned with "traceID" log field.
func (t *Tracer) StartSpanFromContext(ctx context.Context, operationName string, l logging.Logger, opts ...opentracing.StartSpanOption) (opentracing.Span, *logrus.Entry, context.Context) { func (t *Tracer) StartSpanFromContext(ctx context.Context, operationName string, l logging.Logger, opts ...opentracing.StartSpanOption) (opentracing.Span, *logrus.Entry, context.Context) {
if t == nil { if t == nil {
t = noopTracer t = noopTracer
...@@ -239,7 +239,7 @@ func FromContext(ctx context.Context) opentracing.SpanContext { ...@@ -239,7 +239,7 @@ func FromContext(ctx context.Context) opentracing.SpanContext {
return c return c
} }
// NewLoggerWithTraceID creates a new log Entry with "traceid" field added if it // NewLoggerWithTraceID creates a new log Entry with "traceID" field added if it
// exists in tracing span context stored from go context. // exists in tracing span context stored from go context.
func NewLoggerWithTraceID(ctx context.Context, l logging.Logger) *logrus.Entry { func NewLoggerWithTraceID(ctx context.Context, l logging.Logger) *logrus.Entry {
return loggerWithTraceID(FromContext(ctx), l) return loggerWithTraceID(FromContext(ctx), l)
......
...@@ -55,7 +55,7 @@ func IsSynced(ctx context.Context, backend Backend, maxDelay time.Duration) (boo ...@@ -55,7 +55,7 @@ func IsSynced(ctx context.Context, backend Backend, maxDelay time.Duration) (boo
// WaitSynced will wait until we are synced with the given blockchain backend, // WaitSynced will wait until we are synced with the given blockchain backend,
// with the given maxDelay duration as the maximum time we can be behind the // with the given maxDelay duration as the maximum time we can be behind the
// last block. // last block.
func WaitSynced(logger logging.Logger, ctx context.Context, backend Backend, maxDelay time.Duration) error { func WaitSynced(ctx context.Context, logger logging.Logger, backend Backend, maxDelay time.Duration) error {
for { for {
synced, blockTime, err := IsSynced(ctx, backend, maxDelay) synced, blockTime, err := IsSynced(ctx, backend, maxDelay)
if err != nil { if err != nil {
......
...@@ -27,14 +27,19 @@ type Traverser interface { ...@@ -27,14 +27,19 @@ type Traverser interface {
Traverse(context.Context, swarm.Address, swarm.AddressIterFunc) error Traverse(context.Context, swarm.Address, swarm.AddressIterFunc) error
} }
type PutGetter interface {
storage.Putter
storage.Getter
}
// New constructs for a new Traverser. // New constructs for a new Traverser.
func New(store storage.Storer) Traverser { func New(store PutGetter) Traverser {
return &service{store: store} return &service{store: store}
} }
// service is implementation of Traverser using storage.Storer as its storage. // service is implementation of Traverser using storage.Storer as its storage.
type service struct { type service struct {
store storage.Storer store PutGetter
} }
// Traverse implements Traverser.Traverse method. // Traverse implements Traverser.Traverse method.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment