Commit 24e7b114 authored by luxq's avatar luxq

Merge branch 'ori-master' into master

parents b2a8f3b1 c3f9dc19
......@@ -77,6 +77,9 @@ jobs:
- name: Test pushsync (chunks)
id: pushsync-chunks-1
run: beekeeper check --cluster-name local-dns --checks=ci-pushsync-chunks
- name: Test pushsync (light mode chunks)
id: pushsync-chunks-2
run: beekeeper check --cluster-name local-dns --checks=ci-pushsync-light-chunks
- name: Test retrieval
id: retrieval-1
run: beekeeper check --cluster-name local-dns --checks=ci-retrieval
......
name: OpenAPI
on:
push:
branches:
- 'master'
pull_request:
branches:
- '**'
jobs:
build:
name: Preview
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: '0'
- name: Check whether docs have changed
id: checkdocs
run: |
changed=false
git diff --name-only HEAD^ HEAD > files.txt
while IFS= read -r file
do
if [[ $file == openapi/* ]]; then
echo "detected openapi spec change"
changed=true
fi
done < files.txt
if [ $changed == true ]
then
echo "::set-output name=build_docs::true"
else
echo "::set-output name=build_docs::false"
fi
- name: Build the OpenAPI specs
if: steps.checkdocs.outputs.build_docs == 'true'
uses: acud/openapi-dockerized@v1
with:
build-roots: 'openapi/Swarm.yaml openapi/SwarmDebug.yaml'
env:
AWS_ACCESS_KEY_ID: ${{ secrets.DO_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DO_AWS_SECRET_ACCESS_KEY }}
AWS_EC2_METADATA_DISABLED: true #needed when pushing to DigitalOcean Spaces
AWS_ENDPOINT: fra1.digitaloceanspaces.com
BUCKET_NAME: openapi-specs
......@@ -11,7 +11,7 @@ builds:
- -v
- -trimpath
ldflags:
- -s -w -X github.com/ethersphere/bee.version={{.Version}} -X github.com/ethersphere/bee.commit={{.ShortCommit}} -X github.com/ethersphere/bee.commitTime={{.CommitTimestamp}}
- -s -w -X github.com/ethersphere/bee.version={{.Version}} -X github.com/ethersphere/bee.commitHash={{.ShortCommit}} -X github.com/ethersphere/bee.commitTime={{.CommitTimestamp}}
env:
- CGO_ENABLED=0
goos:
......
......@@ -8,7 +8,7 @@ COPY . ./
RUN make binary
FROM debian:10.9-slim
FROM debian:10.10-slim
ENV DEBIAN_FRONTEND noninteractive
......
FROM debian:10.9-slim
FROM debian:10.10-slim
ENV DEBIAN_FRONTEND noninteractive
......
FROM debian:10.9-slim
FROM debian:10.10-slim
ENV DEBIAN_FRONTEND noninteractive
......
......@@ -9,10 +9,10 @@ BEEKEEPER_CLUSTER ?= local
BEELOCAL_BRANCH ?= main
BEEKEEPER_BRANCH ?= master
COMMIT ?= "$(shell git describe --long --dirty --always --match "" || true)"
COMMIT_HASH ?= "$(shell git describe --long --dirty --always --match "" || true)"
CLEAN_COMMIT ?= "$(shell git describe --long --always --match "" || true)"
COMMIT_TIME ?= "$(shell git show -s --format=%ct $(CLEAN_COMMIT) || true)"
LDFLAGS ?= -s -w -X github.com/ethersphere/bee.commit="$(COMMIT)" -X github.com/ethersphere/bee.commitTime="$(COMMIT_TIME)"
LDFLAGS ?= -s -w -X github.com/ethersphere/bee.commitHash="$(COMMIT_HASH)" -X github.com/ethersphere/bee.commitTime="$(COMMIT_TIME)"
.PHONY: all
all: build lint vet test-race binary
......
......@@ -3,6 +3,8 @@
[![Go](https://github.com/ethersphere/bee/workflows/Go/badge.svg)](https://github.com/ethersphere/bee/actions)
[![Go Reference](https://pkg.go.dev/badge/github.com/ethersphere/bee.svg)](https://pkg.go.dev/github.com/ethersphere/bee)
[![Coverage Status](https://coveralls.io/repos/github/ethersphere/bee/badge.svg)](https://coveralls.io/github/ethersphere/bee)
[![API OpenAPI Specs](https://img.shields.io/badge/openapi-api-blue)](https://docs.ethswarm.org/api/)
[![Debug API OpenAPI Specs](https://img.shields.io/badge/openapi-debugapi-lightblue)](https://docs.ethswarm.org/debug-api/)
## DISCLAIMER
......
......@@ -40,9 +40,10 @@ const (
optionNameNetworkID = "network-id"
optionWelcomeMessage = "welcome-message"
optionCORSAllowedOrigins = "cors-allowed-origins"
optionNameStandalone = "standalone"
optionNameTracingEnabled = "tracing-enable"
optionNameTracingEndpoint = "tracing-endpoint"
optionNameTracingHost = "tracing-host"
optionNameTracingPort = "tracing-port"
optionNameTracingServiceName = "tracing-service-name"
optionNameVerbosity = "verbosity"
optionNameGlobalPinningEnabled = "global-pinning-enable"
......@@ -69,6 +70,7 @@ const (
optionNameBlockTime = "block-time"
optionWarmUpTime = "warmup-time"
optionNameMainNet = "mainnet"
optionNameRetrievalCaching = "cache-retrieval"
)
func init() {
......@@ -210,14 +212,15 @@ func (c *command) setAllFlags(cmd *cobra.Command) {
cmd.Flags().String(optionNameNATAddr, "", "NAT exposed address")
cmd.Flags().Bool(optionNameP2PWSEnable, false, "enable P2P WebSocket transport")
cmd.Flags().Bool(optionNameP2PQUICEnable, false, "enable P2P QUIC transport")
cmd.Flags().StringSlice(optionNameBootnodes, []string{}, "initial nodes to connect to")
cmd.Flags().StringSlice(optionNameBootnodes, []string{"/dnsaddr/testnet.ethswarm.org"}, "initial nodes to connect to")
cmd.Flags().Bool(optionNameDebugAPIEnable, false, "enable debug HTTP API")
cmd.Flags().String(optionNameDebugAPIAddr, ":1635", "debug HTTP API listen address")
cmd.Flags().Uint64(optionNameNetworkID, 10, "ID of the Swarm network")
cmd.Flags().StringSlice(optionCORSAllowedOrigins, []string{}, "origins with CORS headers enabled")
cmd.Flags().Bool(optionNameStandalone, false, "whether we want the node to start with no listen addresses for p2p")
cmd.Flags().Bool(optionNameTracingEnabled, false, "enable tracing")
cmd.Flags().String(optionNameTracingEndpoint, "127.0.0.1:6831", "endpoint to send tracing data")
cmd.Flags().String(optionNameTracingHost, "", "host to send tracing data")
cmd.Flags().String(optionNameTracingPort, "", "port to send tracing data")
cmd.Flags().String(optionNameTracingServiceName, "bee", "service name identifier for tracing")
cmd.Flags().String(optionNameVerbosity, "info", "log verbosity level 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=trace")
cmd.Flags().String(optionWelcomeMessage, "", "send a welcome message string during handshakes")
......@@ -245,6 +248,7 @@ func (c *command) setAllFlags(cmd *cobra.Command) {
cmd.Flags().String(optionNameSwapDeploymentGasPrice, "", "gas price in wei to use for deployment and funding")
cmd.Flags().Duration(optionWarmUpTime, time.Minute*20, "time to warmup the node before pull/push protocols can be kicked off.")
cmd.Flags().Bool(optionNameMainNet, false, "triggers connect to main net bootnodes.")
cmd.Flags().Bool(optionNameRetrievalCaching, true, "enable forwarded content caching")
}
func newLogger(cmd *cobra.Command, verbosity string) (logging.Logger, error) {
......
......@@ -106,7 +106,7 @@ damage to hardware or loss of funds associated with the Ethereum account connect
No developers or entity involved will be liable for any claims and damages associated with your use,
inability to use, or your interaction with other nodes or the software.`)
fmt.Printf("\n\nversion: %v - planned to be supported until %v, please follow http://ethswarm.org/\n\n", bee.Version, endSupportDate())
fmt.Printf("\n\nversion: %v - planned to be supported until %v, please follow https://ethswarm.org/\n\n", bee.Version, endSupportDate())
debugAPIAddr := c.config.GetString(optionNameDebugAPIAddr)
if !c.config.GetBool(optionNameDebugAPIEnable) {
......@@ -128,18 +128,34 @@ inability to use, or your interaction with other nodes or the software.`)
}
mainnet := c.config.GetBool(optionNameMainNet)
networkID := c.config.GetUint64(optionNameNetworkID)
networkID, err = parseNetworks(mainnet, networkID)
if err != nil {
return err
if mainnet {
userHasSetNetworkID := c.config.IsSet(optionNameNetworkID)
if userHasSetNetworkID && networkID != 1 {
return errors.New("provided network ID does not match mainnet")
}
networkID = 1
}
bootnodes := c.config.GetStringSlice(optionNameBootnodes)
bootnodes = parseBootnodes(logger, mainnet, networkID, bootnodes)
blockTime := c.config.GetUint64(optionNameBlockTime)
blockTime = parseBlockTime(mainnet, blockTime)
networkConfig := getConfigByNetworkID(networkID, blockTime)
if c.config.IsSet(optionNameBootnodes) {
networkConfig.bootNodes = bootnodes
}
if c.config.IsSet(optionNameBlockTime) && blockTime != 0 {
networkConfig.blockTime = blockTime
}
tracingEndpoint := c.config.GetString(optionNameTracingEndpoint)
if c.config.IsSet(optionNameTracingHost) && c.config.IsSet(optionNameTracingPort) {
tracingEndpoint = strings.Join([]string{c.config.GetString(optionNameTracingHost), c.config.GetString(optionNameTracingPort)}, ":")
}
b, err := node.NewBee(c.config.GetString(optionNameP2PAddr), signerConfig.publicKey, signerConfig.signer, networkID, logger, signerConfig.libp2pPrivateKey, signerConfig.pssPrivateKey, &node.Options{
DataDir: c.config.GetString(optionNameDataDir),
......@@ -155,11 +171,10 @@ inability to use, or your interaction with other nodes or the software.`)
EnableWS: c.config.GetBool(optionNameP2PWSEnable),
EnableQUIC: c.config.GetBool(optionNameP2PQUICEnable),
WelcomeMessage: c.config.GetString(optionWelcomeMessage),
Bootnodes: bootnodes,
Bootnodes: networkConfig.bootNodes,
CORSAllowedOrigins: c.config.GetStringSlice(optionCORSAllowedOrigins),
Standalone: c.config.GetBool(optionNameStandalone),
TracingEnabled: c.config.GetBool(optionNameTracingEnabled),
TracingEndpoint: c.config.GetString(optionNameTracingEndpoint),
TracingEndpoint: tracingEndpoint,
TracingServiceName: c.config.GetString(optionNameTracingServiceName),
Logger: logger,
GlobalPinningEnabled: c.config.GetBool(optionNameGlobalPinningEnabled),
......@@ -179,9 +194,11 @@ inability to use, or your interaction with other nodes or the software.`)
BlockHash: c.config.GetString(optionNameBlockHash),
PostageContractAddress: c.config.GetString(optionNamePostageContractAddress),
PriceOracleAddress: c.config.GetString(optionNamePriceOracleAddress),
BlockTime: blockTime,
BlockTime: networkConfig.blockTime,
DeployGasPrice: c.config.GetString(optionNameSwapDeploymentGasPrice),
WarmupTime: c.config.GetDuration(optionWarmUpTime),
ChainID: networkConfig.chainID,
RetrievalCaching: c.config.GetBool(optionNameRetrievalCaching),
})
if err != nil {
return err
......@@ -420,36 +437,28 @@ func (c *command) configureSigner(cmd *cobra.Command, logger logging.Logger) (co
}, nil
}
func parseNetworks(main bool, networkID uint64) (uint64, error) {
if main && networkID != 1 {
return 0, errors.New("provided network ID does not match mainnet")
}
return networkID, nil
type networkConfig struct {
bootNodes []string
blockTime uint64
chainID int64
}
func parseBootnodes(log logging.Logger, main bool, networkID uint64, bootnodes []string) []string {
if len(bootnodes) > 0 {
return bootnodes // use provided values
}
if main {
return []string{"/dnsaddr/mainnet.ethswarm.org"}
func getConfigByNetworkID(networkID uint64, defaultBlockTime uint64) *networkConfig {
var config = networkConfig{
blockTime: defaultBlockTime,
}
if networkID == 10 {
return []string{"/dnsaddr/testnet.ethswarm.org"}
}
log.Warning("no bootnodes defined for network ID", networkID)
return bootnodes
}
func parseBlockTime(main bool, blockTime uint64) uint64 {
if main {
return uint64(5 * time.Second)
switch networkID {
case 1:
config.bootNodes = []string{"/dnsaddr/mainnet.ethswarm.org"}
config.blockTime = 5
config.chainID = 100
case 5: //staging
config.chainID = 5
case 10: //test
config.chainID = 5
default: //will use the value provided by the chain
config.chainID = -1
}
return blockTime
return &config
}
......@@ -3,48 +3,49 @@ module github.com/ethersphere/bee
go 1.15
require (
github.com/btcsuite/btcd v0.21.0-beta
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
github.com/btcsuite/btcd v0.22.0-beta
github.com/coreos/go-semver v0.3.0
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/ethereum/go-ethereum v1.9.23
github.com/ethersphere/go-price-oracle-abi v0.1.0
github.com/ethersphere/go-storage-incentives-abi v0.3.0
github.com/ethersphere/go-sw3-abi v0.4.0
github.com/ethersphere/langos v1.0.0
github.com/gogo/protobuf v1.3.1
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/google/go-cmp v0.5.0
github.com/google/gopacket v1.1.19 // indirect
github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.5
github.com/google/uuid v1.1.4 // indirect
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 // indirect
github.com/gorilla/handlers v1.4.2
github.com/gorilla/mux v1.7.4
github.com/gorilla/websocket v1.4.2
github.com/hashicorp/go-multierror v1.1.1
github.com/huin/goupnp v1.0.1 // indirect
github.com/ipfs/go-log v1.0.5 // indirect
github.com/ipfs/go-log/v2 v2.3.0 // indirect
github.com/kardianos/service v1.2.0
github.com/klauspost/cpuid/v2 v2.0.8 // indirect
github.com/koron/go-ssdp v0.0.2 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/libp2p/go-libp2p v0.13.0
github.com/libp2p/go-libp2p-autonat v0.4.0
github.com/libp2p/go-libp2p-core v0.8.0
github.com/libp2p/go-libp2p-noise v0.1.2 // indirect
github.com/libp2p/go-libp2p-peerstore v0.2.6
github.com/libp2p/go-libp2p v0.14.3
github.com/libp2p/go-libp2p-autonat v0.4.2
github.com/libp2p/go-libp2p-core v0.8.5
github.com/libp2p/go-libp2p-discovery v0.5.1 // indirect
github.com/libp2p/go-libp2p-peerstore v0.2.7
github.com/libp2p/go-libp2p-quic-transport v0.10.0
github.com/libp2p/go-libp2p-transport-upgrader v0.4.0
github.com/libp2p/go-netroute v0.1.4 // indirect
github.com/libp2p/go-sockaddr v0.1.0 // indirect
github.com/libp2p/go-tcp-transport v0.2.1
github.com/libp2p/go-libp2p-transport-upgrader v0.4.2
github.com/libp2p/go-tcp-transport v0.2.3
github.com/libp2p/go-ws-transport v0.4.0
github.com/mattn/go-colorable v0.1.2 // indirect
github.com/miekg/dns v1.1.43 // indirect
github.com/mitchellh/mapstructure v1.3.2 // indirect
github.com/multiformats/go-multiaddr v0.3.1
github.com/multiformats/go-multiaddr-dns v0.2.0
github.com/multiformats/go-multistream v0.2.0
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/multiformats/go-multiaddr v0.3.3
github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/multiformats/go-multistream v0.2.2
github.com/opentracing/opentracing-go v1.2.0
github.com/pelletier/go-toml v1.8.0 // indirect
github.com/prometheus/client_golang v1.7.1
github.com/prometheus/client_golang v1.11.0
github.com/prometheus/common v0.29.0 // indirect
github.com/prometheus/procfs v0.7.0 // indirect
github.com/sirupsen/logrus v1.6.0
github.com/smartystreets/assertions v1.1.1 // indirect
github.com/spf13/afero v1.3.1 // indirect
......@@ -59,22 +60,18 @@ require (
github.com/vmihailenco/msgpack/v5 v5.3.4
github.com/wealdtech/go-ens/v3 v3.4.4
gitlab.com/nolash/go-mockbytes v0.0.7
go.opencensus.io v0.22.5 // indirect
go.uber.org/atomic v1.7.0
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.16.0 // indirect
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
golang.org/x/net v0.0.0-20201224014010-6772e930b67b
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
golang.org/x/sys v0.0.0-20210108172913-0df2131ae363
go.uber.org/atomic v1.8.0
go.uber.org/multierr v1.7.0 // indirect
go.uber.org/zap v1.18.1 // indirect
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e
golang.org/x/net v0.0.0-20210614182718-04defd469f4e
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf
golang.org/x/text v0.3.4 // indirect
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/ini.v1 v1.57.0 // indirect
gopkg.in/yaml.v2 v2.3.0
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect
honnef.co/go/tools v0.0.1-2020.1.4 // indirect
gopkg.in/yaml.v2 v2.4.0
resenje.org/singleflight v0.2.0
resenje.org/web v0.4.3
)
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
openapi: 3.0.3
info:
version: 0.6.0
version: 1.0.0
title: Common Data Types
description: |
\*****bzzz*****
......@@ -333,6 +333,31 @@ components:
type: integer
immutableFlag:
type: boolean
exists:
type: boolean
StampBucketData:
type: object
properties:
bucketID:
type: integer
collisions:
type: integer
PostageStampBuckets:
type: object
properties:
depth:
type: integer
bucketDepth:
type: integer
bucketUpperBound:
type: integer
buckets:
type: array
nullable: true
items:
$ref: "#/components/schemas/StampBucketData"
Settlement:
type: object
......@@ -449,17 +474,17 @@ components:
nonce:
type: integer
gasPrice:
type: "#/components/schemas/BigInt"
$ref: "#/components/schemas/BigInt"
gasLimit:
type: integer
data:
type: string
created:
type: "#/components/schemas/DateTime"
$ref: "#/components/schemas/DateTime"
description:
type: string
value:
type: "#/components/schemas/BigInt"
$ref: "#/components/schemas/BigInt"
PendingTransactionReponse:
type: object
......@@ -554,7 +579,10 @@ components:
schema:
type: boolean
required: false
description: Represents the pinning state of the chunk
description: >
Represents if the uploaded data should be also locally pinned on the node.
Warning! Not available for nodes that run in Gateway mode!
SwarmEncryptParameter:
in: header
......@@ -562,7 +590,10 @@ components:
schema:
type: boolean
required: false
description: Represents the encrypting state of the file
description: >
Represents the encrypting state of the file
Warning! Not available for nodes that run in Gateway mode!
ContentTypePreserved:
in: header
......@@ -603,7 +634,7 @@ components:
description: "ID of Postage Batch that is used to upload data with"
required: true
schema:
$ref: "SwarmCommon.yaml#/components/schemas/SwarmAddress"
$ref: "#/components/schemas/SwarmAddress"
responses:
"204":
......@@ -626,12 +657,6 @@ components:
application/problem+json:
schema:
$ref: "#/components/schemas/ProblemDetails"
"403":
description: Forbidden
content:
application/problem+json:
schema:
$ref: "#/components/schemas/ProblemDetails"
"404":
description: Not Found
content:
......@@ -644,3 +669,10 @@ components:
application/problem+json:
schema:
$ref: "#/components/schemas/ProblemDetails"
"GatewayForbidden":
description: "Endpoint or header (pinning or encryption headers) forbidden in Gateway mode"
content:
application/problem+json:
schema:
$ref: "#/components/schemas/ProblemDetails"
\ No newline at end of file
openapi: 3.0.3
info:
version: 0.6.0
version: 1.0.0
title: Bee Debug API
description: "A list of the currently provided debug interfaces to interact with the bee node"
......@@ -9,7 +9,7 @@ security:
externalDocs:
description: Browse the documentation @ the Swarm Docs
url: "https://docs.swarm.eth"
url: "https://docs.ethswarm.org"
servers:
- url: "http://{apiRoot}:{port}"
......@@ -663,8 +663,6 @@ paths:
$ref: "SwarmCommon.yaml#/components/schemas/NewTagDebugResponse"
"400":
$ref: "SwarmCommon.yaml#/components/responses/400"
"403":
$ref: "SwarmCommon.yaml#/components/responses/403"
"500":
$ref: "SwarmCommon.yaml#/components/responses/500"
default:
......@@ -802,9 +800,34 @@ paths:
default:
description: Default response
"/stamps/{id}/buckets":
parameters:
- in: path
name: id
schema:
$ref: "SwarmCommon.yaml#/components/schemas/BatchID"
required: true
description: Swarm address of the stamp
get:
summary: Get extended bucket data of a batch
tags:
- Postage Stamps
responses:
"200":
description: Returns extended bucket data of the provided batch ID
content:
application/json:
schema:
$ref: "SwarmCommon.yaml#/components/schemas/PostageStampBuckets"
"400":
$ref: "SwarmCommon.yaml#/components/responses/400"
default:
description: Default response
"/stamps/{amount}/{depth}":
post:
summary: Buy a new postage batch. Be aware, this endpoint create an on-chain transactions and transfers BZZ from the node's Ethereum account and hence directly manipulates the wallet balance!
summary: Buy a new postage batch.
description: Be aware, this endpoint creates an on-chain transactions and transfers BZZ from the node's Ethereum account and hence directly manipulates the wallet balance!
tags:
- Postage Stamps
parameters:
......@@ -826,6 +849,11 @@ paths:
type: string
required: false
description: An optional label for this batch
- in: header
name: immutable
schema:
type: boolean
required: false
- $ref: "SwarmCommon.yaml#/components/parameters/GasPriceParameter"
responses:
"201":
......@@ -840,25 +868,3 @@ paths:
$ref: "SwarmCommon.yaml#/components/responses/500"
default:
description: Default response
"/stamps/default/{id}":
parameters:
- in: path
name: id
schema:
$ref: "SwarmCommon.yaml#/components/schemas/BatchID"
required: true
description: Swarm address of the stamp
put:
summary: Set the default postage stamp issuer
tags:
- Postage Stamps
responses:
"204":
description: The default stamp issuer was updated successfully
"400":
$ref: "SwarmCommon.yaml#/components/responses/400"
"500":
$ref: "SwarmCommon.yaml#/components/responses/500"
default:
description: Default response
\ No newline at end of file
......@@ -24,7 +24,7 @@ The node's output was:
*)
ETH_ADDRESS=$(echo "$RESP" | grep ethereum | cut -d' ' -f6 | tr -d '"')
echo "
Please make sure there is sufficient ETH and BZZ available on the node's Ethereum address: $ETH_ADDRESS.
Please make sure there is sufficient native tokens and BZZ available on the node's Ethereum address at the APPROPRIATE BLOCKCHAIN: 0x$ETH_ADDRESS.
Learn how to fund your node by visiting our docs at at https://docs.ethswarm.org/docs/installation/fund-your-node
......
......@@ -62,8 +62,6 @@ password-file: /var/lib/bee/password
# postage-stamp-address: ""
## ENS compatible API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url
# resolver-options: []
## whether we want the node to start with no listen addresses for p2p
# standalone: false
## enable swap (default true)
# swap-enable: true
## swap ethereum blockchain endpoint (default "ws://localhost:8546")
......
......@@ -45,7 +45,6 @@ services:
- BEE_PAYMENT_TOLERANCE
- BEE_POSTAGE_STAMP_ADDRESS
- BEE_RESOLVER_OPTIONS
- BEE_STANDALONE
- BEE_SWAP_ENABLE
- BEE_SWAP_ENDPOINT
- BEE_SWAP_FACTORY_ADDRESS
......
......@@ -69,8 +69,6 @@ BEE_CLEF_SIGNER_ENABLE=true
# BEE_POSTAGE_STAMP_ADDRESS=
## ENS compatible API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url
# BEE_RESOLVER_OPTIONS=[]
## whether we want the node to start with no listen addresses for p2p
# BEE_STANDALONE=false
## enable swap (default true)
# BEE_SWAP_ENABLE=true
## swap ethereum blockchain endpoint (default ws://localhost:8546)
......
......@@ -62,8 +62,6 @@ password-file: /usr/local/var/lib/swarm-bee/password
# postage-stamp-address: ""
## ENS compatible API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url
# resolver-options: []
## whether we want the node to start with no listen addresses for p2p
# standalone: false
## enable swap (default true)
# swap-enable: true
## swap ethereum blockchain endpoint (default "ws://localhost:8546")
......
......@@ -52,8 +52,6 @@ password-file: ./password
# postage-stamp-address: ""
## ENS compatible API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url
# resolver-options: []
## whether we want the node to start with no listen addresses for p2p
# standalone: false
## enable swap (default true)
# swap-enable: true
## swap ethereum blockchain endpoint (default "ws://localhost:8546")
......
......@@ -69,14 +69,13 @@ const (
)
var (
errInvalidNameOrAddress = errors.New("invalid name or bzz address")
errNoResolver = errors.New("no resolver connected")
errInvalidRequest = errors.New("could not validate request")
errInvalidContentType = errors.New("invalid content-type")
errDirectoryStore = errors.New("could not store directory")
errFileStore = errors.New("could not store file")
errInvalidPostageBatch = errors.New("invalid postage batch id")
errSwarmPostageBatchIDHeaderNotFound = fmt.Errorf("header %s not found", SwarmPostageBatchIdHeader)
errInvalidNameOrAddress = errors.New("invalid name or bzz address")
errNoResolver = errors.New("no resolver connected")
errInvalidRequest = errors.New("could not validate request")
errInvalidContentType = errors.New("invalid content-type")
errDirectoryStore = errors.New("could not store directory")
errFileStore = errors.New("could not store file")
errInvalidPostageBatch = errors.New("invalid postage batch id")
)
// Service is the API service interface.
......@@ -238,7 +237,7 @@ func requestPostageBatchId(r *http.Request) ([]byte, error) {
return b, nil
}
return nil, errSwarmPostageBatchIDHeaderNotFound
return nil, errInvalidPostageBatch
}
func (s *server) newTracingHandler(spanName string) func(h http.Handler) http.Handler {
......
......@@ -284,7 +284,7 @@ func TestPostageHeaderError(t *testing.T) {
for _, endpoint := range endpoints {
t.Run(endpoint+": empty batch", func(t *testing.T) {
hexbatch := hex.EncodeToString(batchEmpty)
expCode := http.StatusCreated
expCode := http.StatusBadRequest
jsonhttptest.Request(t, client, http.MethodPost, "/"+endpoint, expCode,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "application/octet-stream"),
......
......@@ -52,10 +52,7 @@ func (s *server) bytesUploadHandler(w http.ResponseWriter, r *http.Request) {
ctx := sctx.SetTag(r.Context(), tag)
batch, err := requestPostageBatchId(r)
switch {
case errors.Is(err, errSwarmPostageBatchIDHeaderNotFound) && s.post.DefaultIssuer() != nil:
batch = s.post.DefaultIssuer().ID()
case err != nil:
if err != nil {
logger.Debugf("bytes upload: postage batch id:%v", err)
logger.Error("bytes upload: postage batch id")
jsonhttp.BadRequest(w, nil)
......
......@@ -46,10 +46,7 @@ func (s *server) bzzUploadHandler(w http.ResponseWriter, r *http.Request) {
}
batch, err := requestPostageBatchId(r)
switch {
case errors.Is(err, errSwarmPostageBatchIDHeaderNotFound) && s.post.DefaultIssuer() != nil:
batch = s.post.DefaultIssuer().ID()
case err != nil:
if err != nil {
logger.Debugf("bzz upload: postage batch id: %v", err)
logger.Error("bzz upload: postage batch id")
jsonhttp.BadRequest(w, "invalid postage batch id")
......
......@@ -86,10 +86,7 @@ func (s *server) chunkUploadHandler(w http.ResponseWriter, r *http.Request) {
}
batch, err := requestPostageBatchId(r)
switch {
case errors.Is(err, errSwarmPostageBatchIDHeaderNotFound) && s.post.DefaultIssuer() != nil:
batch = s.post.DefaultIssuer().ID()
case err != nil:
if err != nil {
s.logger.Debugf("chunk upload: postage batch id: %v", err)
s.logger.Error("chunk upload: postage batch id")
jsonhttp.BadRequest(w, "invalid postage batch id")
......
......@@ -17,6 +17,9 @@ type (
TagResponse = tagResponse
TagRequest = tagRequest
ListTagsResponse = listTagsResponse
PostageCreateResponse = postageCreateResponse
PostageStampResponse = postageStampResponse
PostageStampsResponse = postageStampsResponse
)
var (
......
......@@ -142,10 +142,7 @@ func (s *server) feedPostHandler(w http.ResponseWriter, r *http.Request) {
}
batch, err := requestPostageBatchId(r)
switch {
case errors.Is(err, errSwarmPostageBatchIDHeaderNotFound) && s.post.DefaultIssuer() != nil:
batch = s.post.DefaultIssuer().ID()
case err != nil:
if err != nil {
s.logger.Debugf("feed put: postage batch id: %v", err)
s.logger.Error("feed put: postage batch id")
jsonhttp.BadRequest(w, "invalid postage batch id")
......
......@@ -212,9 +212,9 @@ func TestFeed_Post(t *testing.T) {
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
)
})
t.Run("ok - batch empty", func(t *testing.T) {
t.Run("bad request - batch empty", func(t *testing.T) {
hexbatch := hex.EncodeToString(batchEmpty)
jsonhttptest.Request(t, client, http.MethodPost, url, http.StatusCreated,
jsonhttptest.Request(t, client, http.MethodPost, url, http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
)
})
......
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package api
import (
"encoding/hex"
"encoding/json"
"errors"
"math/big"
"net/http"
"strconv"
"github.com/ethersphere/bee/pkg/bigint"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/postage/postagecontract"
"github.com/ethersphere/bee/pkg/sctx"
"github.com/gorilla/mux"
)
const (
gasPriceHeader = "Gas-Price"
immutableHeader = "Immutable"
errBadGasPrice = "bad gas price"
)
type batchID []byte
func (b batchID) MarshalJSON() ([]byte, error) {
return json.Marshal(hex.EncodeToString(b))
}
type postageCreateResponse struct {
BatchID batchID `json:"batchID"`
}
func (s *server) postageCreateHandler(w http.ResponseWriter, r *http.Request) {
depthStr := mux.Vars(r)["depth"]
amount, ok := big.NewInt(0).SetString(mux.Vars(r)["amount"], 10)
if !ok {
s.logger.Error("create batch: invalid amount")
jsonhttp.BadRequest(w, "invalid postage amount")
return
}
depth, err := strconv.ParseUint(depthStr, 10, 8)
if err != nil {
s.logger.Debugf("create batch: invalid depth: %v", err)
s.logger.Error("create batch: invalid depth")
jsonhttp.BadRequest(w, "invalid depth")
return
}
label := r.URL.Query().Get("label")
ctx := r.Context()
if price, ok := r.Header[gasPriceHeader]; ok {
p, ok := big.NewInt(0).SetString(price[0], 10)
if !ok {
s.logger.Error("create batch: bad gas price")
jsonhttp.BadRequest(w, errBadGasPrice)
return
}
ctx = sctx.SetGasPrice(ctx, p)
}
var immutable bool
if val, ok := r.Header[immutableHeader]; ok {
immutable, _ = strconv.ParseBool(val[0])
}
batchID, err := s.postageContract.CreateBatch(ctx, amount, uint8(depth), immutable, label)
if err != nil {
if errors.Is(err, postagecontract.ErrInsufficientFunds) {
s.logger.Debugf("create batch: out of funds: %v", err)
s.logger.Error("create batch: out of funds")
jsonhttp.BadRequest(w, "out of funds")
return
}
if errors.Is(err, postagecontract.ErrInvalidDepth) {
s.logger.Debugf("create batch: invalid depth: %v", err)
s.logger.Error("create batch: invalid depth")
jsonhttp.BadRequest(w, "invalid depth")
return
}
s.logger.Debugf("create batch: failed to create: %v", err)
s.logger.Error("create batch: failed to create")
jsonhttp.InternalServerError(w, "cannot create batch")
return
}
jsonhttp.Created(w, &postageCreateResponse{
BatchID: batchID,
})
}
type postageStampResponse struct {
BatchID batchID `json:"batchID"`
Utilization uint32 `json:"utilization"`
Usable bool `json:"usable"`
Label string `json:"label"`
Depth uint8 `json:"depth"`
Amount *bigint.BigInt `json:"amount"`
BucketDepth uint8 `json:"bucketDepth"`
BlockNumber uint64 `json:"blockNumber"`
ImmutableFlag bool `json:"immutableFlag"`
}
type postageStampsResponse struct {
Stamps []postageStampResponse `json:"stamps"`
}
func (s *server) postageGetStampsHandler(w http.ResponseWriter, _ *http.Request) {
resp := postageStampsResponse{}
for _, v := range s.post.StampIssuers() {
resp.Stamps = append(resp.Stamps, postageStampResponse{
BatchID: v.ID(),
Utilization: v.Utilization(),
Usable: s.post.IssuerUsable(v),
Label: v.Label(),
Depth: v.Depth(),
Amount: bigint.Wrap(v.Amount()),
BucketDepth: v.BucketDepth(),
BlockNumber: v.BlockNumber(),
ImmutableFlag: v.ImmutableFlag(),
})
}
jsonhttp.OK(w, resp)
}
func (s *server) postageGetStampHandler(w http.ResponseWriter, r *http.Request) {
idStr := mux.Vars(r)["id"]
if idStr == "" || len(idStr) != 64 {
s.logger.Error("get stamp issuer: invalid batchID")
jsonhttp.BadRequest(w, "invalid batchID")
return
}
id, err := hex.DecodeString(idStr)
if err != nil {
s.logger.Error("get stamp issuer: invalid batchID: %v", err)
s.logger.Error("get stamp issuer: invalid batchID")
jsonhttp.BadRequest(w, "invalid batchID")
return
}
issuer, err := s.post.GetStampIssuer(id)
if err != nil {
s.logger.Error("get stamp issuer: get issuer: %v", err)
s.logger.Error("get stamp issuer: get issuer")
jsonhttp.BadRequest(w, "cannot get issuer")
return
}
resp := postageStampResponse{
BatchID: id,
Utilization: issuer.Utilization(),
Usable: s.post.IssuerUsable(issuer),
Label: issuer.Label(),
Depth: issuer.Depth(),
Amount: bigint.Wrap(issuer.Amount()),
BucketDepth: issuer.BucketDepth(),
BlockNumber: issuer.BlockNumber(),
ImmutableFlag: issuer.ImmutableFlag(),
}
jsonhttp.OK(w, &resp)
}
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package api_test
import (
"context"
"encoding/hex"
"errors"
"fmt"
"math/big"
"net/http"
"testing"
"github.com/ethersphere/bee/pkg/api"
"github.com/ethersphere/bee/pkg/bigint"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest"
"github.com/ethersphere/bee/pkg/postage"
mockpost "github.com/ethersphere/bee/pkg/postage/mock"
"github.com/ethersphere/bee/pkg/postage/postagecontract"
contractMock "github.com/ethersphere/bee/pkg/postage/postagecontract/mock"
"github.com/ethersphere/bee/pkg/sctx"
)
func TestPostageCreateStamp(t *testing.T) {
batchID := []byte{1, 2, 3, 4}
initialBalance := int64(1000)
depth := uint8(1)
label := "label"
createBatch := func(amount int64, depth uint8, label string) string {
return fmt.Sprintf("/stamps/%d/%d?label=%s", amount, depth, label)
}
t.Run("ok", func(t *testing.T) {
contract := contractMock.New(
contractMock.WithCreateBatchFunc(func(ctx context.Context, ib *big.Int, d uint8, i bool, l string) ([]byte, error) {
if ib.Cmp(big.NewInt(initialBalance)) != 0 {
return nil, fmt.Errorf("called with wrong initial balance. wanted %d, got %d", initialBalance, ib)
}
if d != depth {
return nil, fmt.Errorf("called with wrong depth. wanted %d, got %d", depth, d)
}
if l != label {
return nil, fmt.Errorf("called with wrong label. wanted %s, got %s", label, l)
}
return batchID, nil
}),
)
client, _, _ := newTestServer(t, testServerOptions{
PostageContract: contract,
})
jsonhttptest.Request(t, client, http.MethodPost, createBatch(initialBalance, depth, label), http.StatusCreated,
jsonhttptest.WithExpectedJSONResponse(&api.PostageCreateResponse{
BatchID: batchID,
}),
)
})
t.Run("with-custom-gas", func(t *testing.T) {
contract := contractMock.New(
contractMock.WithCreateBatchFunc(func(ctx context.Context, ib *big.Int, d uint8, i bool, l string) ([]byte, error) {
if ib.Cmp(big.NewInt(initialBalance)) != 0 {
return nil, fmt.Errorf("called with wrong initial balance. wanted %d, got %d", initialBalance, ib)
}
if d != depth {
return nil, fmt.Errorf("called with wrong depth. wanted %d, got %d", depth, d)
}
if l != label {
return nil, fmt.Errorf("called with wrong label. wanted %s, got %s", label, l)
}
if sctx.GetGasPrice(ctx).Cmp(big.NewInt(10000)) != 0 {
return nil, fmt.Errorf("called with wrong gas price. wanted %d, got %d", 10000, sctx.GetGasPrice(ctx))
}
return batchID, nil
}),
)
client, _, _ := newTestServer(t, testServerOptions{
PostageContract: contract,
})
jsonhttptest.Request(t, client, http.MethodPost, createBatch(initialBalance, depth, label), http.StatusCreated,
jsonhttptest.WithRequestHeader("Gas-Price", "10000"),
jsonhttptest.WithExpectedJSONResponse(&api.PostageCreateResponse{
BatchID: batchID,
}),
)
})
t.Run("with-error", func(t *testing.T) {
contract := contractMock.New(
contractMock.WithCreateBatchFunc(func(ctx context.Context, ib *big.Int, d uint8, i bool, l string) ([]byte, error) {
return nil, errors.New("err")
}),
)
client, _, _ := newTestServer(t, testServerOptions{
PostageContract: contract,
})
jsonhttptest.Request(t, client, http.MethodPost, createBatch(initialBalance, depth, label), http.StatusInternalServerError,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusInternalServerError,
Message: "cannot create batch",
}),
)
})
t.Run("out-of-funds", func(t *testing.T) {
contract := contractMock.New(
contractMock.WithCreateBatchFunc(func(ctx context.Context, ib *big.Int, d uint8, i bool, l string) ([]byte, error) {
return nil, postagecontract.ErrInsufficientFunds
}),
)
client, _, _ := newTestServer(t, testServerOptions{
PostageContract: contract,
})
jsonhttptest.Request(t, client, http.MethodPost, createBatch(initialBalance, depth, label), http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "out of funds",
}),
)
})
t.Run("invalid depth", func(t *testing.T) {
client, _, _ := newTestServer(t, testServerOptions{})
jsonhttptest.Request(t, client, http.MethodPost, "/stamps/1000/ab", http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid depth",
}),
)
})
t.Run("depth less than bucket depth", func(t *testing.T) {
contract := contractMock.New(
contractMock.WithCreateBatchFunc(func(ctx context.Context, ib *big.Int, d uint8, i bool, l string) ([]byte, error) {
return nil, postagecontract.ErrInvalidDepth
}),
)
client, _, _ := newTestServer(t, testServerOptions{
PostageContract: contract,
})
jsonhttptest.Request(t, client, http.MethodPost, "/stamps/1000/9", http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid depth",
}),
)
})
t.Run("invalid balance", func(t *testing.T) {
client, _, _ := newTestServer(t, testServerOptions{})
jsonhttptest.Request(t, client, http.MethodPost, "/stamps/abcd/2", http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid postage amount",
}),
)
})
t.Run("immutable header", func(t *testing.T) {
var immutable bool
contract := contractMock.New(
contractMock.WithCreateBatchFunc(func(ctx context.Context, _ *big.Int, _ uint8, i bool, _ string) ([]byte, error) {
immutable = i
return batchID, nil
}),
)
client, _, _ := newTestServer(t, testServerOptions{
PostageContract: contract,
})
jsonhttptest.Request(t, client, http.MethodPost, "/stamps/1000/24", http.StatusCreated,
jsonhttptest.WithRequestHeader("Immutable", "true"),
jsonhttptest.WithExpectedJSONResponse(&api.PostageCreateResponse{
BatchID: batchID,
}),
)
if !immutable {
t.Fatalf("want true, got %v", immutable)
}
})
}
func TestPostageGetStamps(t *testing.T) {
si := postage.NewStampIssuer("", "", batchOk, big.NewInt(3), 11, 10, 1000, true)
mp := mockpost.New(mockpost.WithIssuer(si))
client, _, _ := newTestServer(t, testServerOptions{Post: mp})
jsonhttptest.Request(t, client, http.MethodGet, "/stamps", http.StatusOK,
jsonhttptest.WithExpectedJSONResponse(&api.PostageStampsResponse{
Stamps: []api.PostageStampResponse{
{
BatchID: batchOk,
Utilization: si.Utilization(),
Usable: true,
Label: si.Label(),
Depth: si.Depth(),
Amount: bigint.Wrap(si.Amount()),
BucketDepth: si.BucketDepth(),
BlockNumber: si.BlockNumber(),
ImmutableFlag: si.ImmutableFlag(),
},
},
}),
)
}
func TestPostageGetStamp(t *testing.T) {
si := postage.NewStampIssuer("", "", batchOk, big.NewInt(3), 11, 10, 1000, true)
mp := mockpost.New(mockpost.WithIssuer(si))
client, _, _ := newTestServer(t, testServerOptions{Post: mp})
t.Run("ok", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodGet, "/stamps/"+batchOkStr, http.StatusOK,
jsonhttptest.WithExpectedJSONResponse(&api.PostageStampResponse{
BatchID: batchOk,
Utilization: si.Utilization(),
Usable: true,
Label: si.Label(),
Depth: si.Depth(),
Amount: bigint.Wrap(si.Amount()),
BucketDepth: si.BucketDepth(),
BlockNumber: si.BlockNumber(),
ImmutableFlag: si.ImmutableFlag(),
}),
)
})
t.Run("ok", func(t *testing.T) {
badBatch := []byte{0, 1, 2}
jsonhttptest.Request(t, client, http.MethodGet, "/stamps/"+hex.EncodeToString(badBatch), http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid batchID",
}),
)
})
t.Run("ok", func(t *testing.T) {
badBatch := []byte{0, 1, 2, 4}
jsonhttptest.Request(t, client, http.MethodGet, "/stamps/"+hex.EncodeToString(badBatch), http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid batchID",
}),
)
})
}
......@@ -78,10 +78,7 @@ func (s *server) pssPostHandler(w http.ResponseWriter, r *http.Request) {
return
}
batch, err := requestPostageBatchId(r)
switch {
case errors.Is(err, errSwarmPostageBatchIDHeaderNotFound) && s.post.DefaultIssuer() != nil:
batch = s.post.DefaultIssuer().ID()
case err != nil:
if err != nil {
s.logger.Debugf("pss: postage batch id: %v", err)
s.logger.Error("pss: postage batch id")
jsonhttp.BadRequest(w, "invalid postage batch id")
......
......@@ -235,9 +235,9 @@ func TestPssSend(t *testing.T) {
jsonhttptest.WithRequestBody(bytes.NewReader(payload)),
)
})
t.Run("ok batch - batch empty", func(t *testing.T) {
t.Run("bad request - batch empty", func(t *testing.T) {
hexbatch := hex.EncodeToString(batchEmpty)
jsonhttptest.Request(t, client, http.MethodPost, "/pss/send/to/12", http.StatusCreated,
jsonhttptest.Request(t, client, http.MethodPost, "/pss/send/to/12", http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
jsonhttptest.WithRequestBody(bytes.NewReader(payload)),
)
......
......@@ -156,6 +156,27 @@ func (s *server) setupRouting() {
})),
)
handle("/stamps", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.postageGetStampsHandler),
})),
)
handle("/stamps/{id}", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.postageGetStampHandler),
})),
)
handle("/stamps/{amount}/{depth}", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"POST": http.HandlerFunc(s.postageCreateHandler),
})),
)
s.Handler = web.ChainHandlers(
httpaccess.NewHTTPAccessLogHandler(s.logger, logrus.InfoLevel, s.tracer, "api access"),
handlers.CompressHandler,
......
......@@ -128,10 +128,7 @@ func (s *server) socUploadHandler(w http.ResponseWriter, r *http.Request) {
return
}
batch, err := requestPostageBatchId(r)
switch {
case errors.Is(err, errSwarmPostageBatchIDHeaderNotFound) && s.post.DefaultIssuer() != nil:
batch = s.post.DefaultIssuer().ID()
case err != nil:
if err != nil {
s.logger.Debugf("soc upload: postage batch id: %v", err)
s.logger.Error("soc upload: postage batch id")
jsonhttp.BadRequest(w, "invalid postage batch id")
......
......@@ -161,10 +161,10 @@ func TestSOC(t *testing.T) {
jsonhttptest.WithRequestBody(bytes.NewReader(s.WrappedChunk.Data())),
)
})
t.Run("ok - batch empty", func(t *testing.T) {
t.Run("err - batch empty", func(t *testing.T) {
s := testingsoc.GenerateMockSOC(t, testData)
hexbatch := hex.EncodeToString(batchEmpty)
jsonhttptest.Request(t, client, http.MethodPost, socResource(hex.EncodeToString(s.Owner), hex.EncodeToString(s.ID), hex.EncodeToString(s.Signature)), http.StatusCreated,
jsonhttptest.Request(t, client, http.MethodPost, socResource(hex.EncodeToString(s.Owner), hex.EncodeToString(s.ID), hex.EncodeToString(s.Signature)), http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
jsonhttptest.WithRequestBody(bytes.NewReader(s.WrappedChunk.Data())),
)
......
package config
import (
"github.com/ethereum/go-ethereum/common"
)
var (
// chain ID
goerliChainID = int64(5)
xdaiChainID = int64(100)
// start block
goerliStartBlock = uint64(4933174)
xdaiStartBlock = uint64(16515648)
// factory address
goerliContractAddress = common.HexToAddress("0x0c9de531dcb38b758fe8a2c163444a5e54ee0db2")
xdaiContractAddress = common.HexToAddress("0x0FDc5429C50e2a39066D8A94F3e2D2476fcc3b85")
goerliFactoryAddress = common.HexToAddress("0x73c412512E1cA0be3b89b77aB3466dA6A1B9d273")
xdaiFactoryAddress = common.HexToAddress("0xc2d5a532cf69aa9a1378737d8ccdef884b6e7420")
goerliLegacyFactoryAddress = common.HexToAddress("0xf0277caffea72734853b834afc9892461ea18474")
// postage stamp
goerliPostageStampContractAddress = common.HexToAddress("0x621e455C4a139f5C4e4A8122Ce55Dc21630769E4")
xdaiPostageStampContractAddress = common.HexToAddress("0x6a1a21eca3ab28be85c7ba22b2d6eae5907c900e")
)
type ChainConfig struct {
StartBlock uint64
LegacyFactories []common.Address
PostageStamp common.Address
CurrentFactory common.Address
PriceOracleAddress common.Address
}
func GetChainConfig(chainID int64) (*ChainConfig, bool) {
var cfg ChainConfig
switch chainID {
case goerliChainID:
cfg.PostageStamp = goerliPostageStampContractAddress
cfg.StartBlock = goerliStartBlock
cfg.CurrentFactory = goerliFactoryAddress
cfg.LegacyFactories = []common.Address{
goerliLegacyFactoryAddress,
}
cfg.PriceOracleAddress = goerliContractAddress
return &cfg, true
case xdaiChainID:
cfg.PostageStamp = xdaiPostageStampContractAddress
cfg.StartBlock = xdaiStartBlock
cfg.CurrentFactory = xdaiFactoryAddress
cfg.LegacyFactories = []common.Address{}
cfg.PriceOracleAddress = xdaiContractAddress
return &cfg, true
default:
return &cfg, false
}
}
......@@ -7,6 +7,7 @@ package clef
import (
"crypto/ecdsa"
"errors"
"fmt"
"math/big"
"os"
"path/filepath"
......@@ -129,7 +130,16 @@ func (c *clefSigner) Sign(data []byte) ([]byte, error) {
// SignTx signs an ethereum transaction.
func (c *clefSigner) SignTx(transaction *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
// chainId is nil here because it is set on the clef side
return c.clef.SignTx(c.account, transaction, nil)
tx, err := c.clef.SignTx(c.account, transaction, nil)
if err != nil {
return nil, err
}
if chainID.Cmp(tx.ChainId()) != 0 {
return nil, fmt.Errorf("misconfigured signer: wrong chain id %d; wanted %d", tx.ChainId(), chainID)
}
return tx, nil
}
// EthereumAddress returns the ethereum address this signer uses.
......
......@@ -34,6 +34,8 @@ type (
PostageCreateResponse = postageCreateResponse
PostageStampResponse = postageStampResponse
PostageStampsResponse = postageStampsResponse
PostageStampBucketsResponse = postageStampBucketsResponse
BucketData = bucketData
)
var (
......
......@@ -8,6 +8,7 @@ import (
"github.com/ethersphere/bee"
"github.com/ethersphere/bee/pkg/metrics"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
)
func newMetricsRegistry() (r *prometheus.Registry) {
......@@ -15,10 +16,10 @@ func newMetricsRegistry() (r *prometheus.Registry) {
// register standard metrics
r.MustRegister(
prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{
Namespace: metrics.Namespace,
}),
prometheus.NewGoCollector(),
collectors.NewGoCollector(),
prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: metrics.Namespace,
Name: "info",
......
......@@ -104,7 +104,7 @@ func TestConnect(t *testing.T) {
func TestDisconnect(t *testing.T) {
address := swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59c")
unknownAdddress := swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59e")
unknownAddress := swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59e")
errorAddress := swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59a")
testErr := errors.New("test error")
......@@ -132,7 +132,7 @@ func TestDisconnect(t *testing.T) {
})
t.Run("unknown", func(t *testing.T) {
jsonhttptest.Request(t, testServer.Client, http.MethodDelete, "/peers/"+unknownAdddress.String(), http.StatusBadRequest,
jsonhttptest.Request(t, testServer.Client, http.MethodDelete, "/peers/"+unknownAddress.String(), http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "peer not found",
......
......@@ -14,7 +14,6 @@ import (
"github.com/ethersphere/bee/pkg/bigint"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/postage/postagecontract"
"github.com/ethersphere/bee/pkg/sctx"
"github.com/gorilla/mux"
......@@ -102,15 +101,35 @@ type postageStampResponse struct {
BucketDepth uint8 `json:"bucketDepth"`
BlockNumber uint64 `json:"blockNumber"`
ImmutableFlag bool `json:"immutableFlag"`
Exists bool `json:"exists"`
}
type postageStampsResponse struct {
Stamps []postageStampResponse `json:"stamps"`
}
type postageStampBucketsResponse struct {
Depth uint8 `json:"depth"`
BucketDepth uint8 `json:"bucketDepth"`
BucketUpperBound uint32 `json:"bucketUpperBound"`
Buckets []bucketData `json:"buckets"`
}
type bucketData struct {
BucketID uint32 `json:"bucketID"`
Collisions uint32 `json:"collisions"`
}
func (s *Service) postageGetStampsHandler(w http.ResponseWriter, _ *http.Request) {
resp := postageStampsResponse{}
for _, v := range s.post.StampIssuers() {
exists, err := s.post.BatchExists(v.ID())
if err != nil {
s.logger.Errorf("get stamp issuer: check batch: %v", err)
s.logger.Error("get stamp issuer: check batch")
jsonhttp.InternalServerError(w, "unable to check batch")
return
}
resp.Stamps = append(resp.Stamps, postageStampResponse{
BatchID: v.ID(),
Utilization: v.Utilization(),
......@@ -121,14 +140,15 @@ func (s *Service) postageGetStampsHandler(w http.ResponseWriter, _ *http.Request
BucketDepth: v.BucketDepth(),
BlockNumber: v.BlockNumber(),
ImmutableFlag: v.ImmutableFlag(),
Exists: exists,
})
}
jsonhttp.OK(w, resp)
}
func (s *Service) postageGetStampHandler(w http.ResponseWriter, r *http.Request) {
func (s *Service) postageGetStampBucketsHandler(w http.ResponseWriter, r *http.Request) {
idStr := mux.Vars(r)["id"]
if idStr == "" || len(idStr) != 64 {
if len(idStr) != 64 {
s.logger.Error("get stamp issuer: invalid batchID")
jsonhttp.BadRequest(w, "invalid batchID")
return
......@@ -145,47 +165,67 @@ func (s *Service) postageGetStampHandler(w http.ResponseWriter, r *http.Request)
if err != nil {
s.logger.Error("get stamp issuer: get issuer: %v", err)
s.logger.Error("get stamp issuer: get issuer")
jsonhttp.BadRequest(w, "cannot get issuer")
jsonhttp.BadRequest(w, "cannot get batch")
return
}
resp := postageStampResponse{
BatchID: id,
Utilization: issuer.Utilization(),
Usable: s.post.IssuerUsable(issuer),
Label: issuer.Label(),
Depth: issuer.Depth(),
Amount: bigint.Wrap(issuer.Amount()),
BucketDepth: issuer.BucketDepth(),
BlockNumber: issuer.BlockNumber(),
ImmutableFlag: issuer.ImmutableFlag(),
b := issuer.Buckets()
resp := postageStampBucketsResponse{
Depth: issuer.Depth(),
BucketDepth: issuer.BucketDepth(),
BucketUpperBound: issuer.BucketUpperBound(),
Buckets: make([]bucketData, len(b)),
}
jsonhttp.OK(w, &resp)
for i, v := range b {
resp.Buckets[i] = bucketData{BucketID: uint32(i), Collisions: v}
}
jsonhttp.OK(w, resp)
}
// postageSetDefaultStampIssuerHandler sets the default postage stamps issuer.
func (s *Service) postageSetDefaultStampIssuerHandler(w http.ResponseWriter, r *http.Request) {
func (s *Service) postageGetStampHandler(w http.ResponseWriter, r *http.Request) {
idStr := mux.Vars(r)["id"]
if idStr == "" || len(idStr) != 64 {
if len(idStr) != 64 {
s.logger.Error("get stamp issuer: invalid batchID")
jsonhttp.BadRequest(w, "invalid batchID")
return
}
id, err := hex.DecodeString(idStr)
if err != nil {
s.logger.Error("set stamp issuer: invalid batchID: %v", err)
s.logger.Error("set stamp issuer: invalid batchID")
s.logger.Errorf("get stamp issuer: invalid batchID: %v", err)
s.logger.Error("get stamp issuer: invalid batchID")
jsonhttp.BadRequest(w, "invalid batchID")
return
}
switch err := s.post.SetDefaultIssuer(id); {
case errors.Is(err, postage.ErrNotFound):
jsonhttp.NotFound(w, nil)
case err != nil:
s.logger.Debugf("debug api: set default stamp issuer: %v", err)
jsonhttp.InternalServerError(w, err)
issuer, err := s.post.GetStampIssuer(id)
if err != nil {
s.logger.Errorf("get stamp issuer: get issuer: %v", err)
s.logger.Error("get stamp issuer: get issuer")
jsonhttp.BadRequest(w, "cannot get issuer")
return
}
exists, err := s.post.BatchExists(id)
if err != nil {
s.logger.Errorf("get stamp issuer: check batch: %v", err)
s.logger.Error("get stamp issuer: check batch")
jsonhttp.InternalServerError(w, "unable to check batch")
return
}
resp := postageStampResponse{
BatchID: id,
Utilization: issuer.Utilization(),
Usable: s.post.IssuerUsable(issuer),
Label: issuer.Label(),
Depth: issuer.Depth(),
Amount: bigint.Wrap(issuer.Amount()),
BucketDepth: issuer.BucketDepth(),
BlockNumber: issuer.BlockNumber(),
ImmutableFlag: issuer.ImmutableFlag(),
Exists: exists,
}
jsonhttp.OK(w, &resp)
}
type reserveStateResponse struct {
......
......@@ -211,6 +211,7 @@ func TestPostageGetStamps(t *testing.T) {
BucketDepth: si.BucketDepth(),
BlockNumber: si.BlockNumber(),
ImmutableFlag: si.ImmutableFlag(),
Exists: true,
},
},
}),
......@@ -234,10 +235,11 @@ func TestPostageGetStamp(t *testing.T) {
BucketDepth: si.BucketDepth(),
BlockNumber: si.BlockNumber(),
ImmutableFlag: si.ImmutableFlag(),
Exists: true,
}),
)
})
t.Run("ok", func(t *testing.T) {
t.Run("bad request", func(t *testing.T) {
badBatch := []byte{0, 1, 2}
jsonhttptest.Request(t, ts.Client, http.MethodGet, "/stamps/"+hex.EncodeToString(badBatch), http.StatusBadRequest,
......@@ -247,7 +249,7 @@ func TestPostageGetStamp(t *testing.T) {
}),
)
})
t.Run("ok", func(t *testing.T) {
t.Run("bad request", func(t *testing.T) {
badBatch := []byte{0, 1, 2, 4}
jsonhttptest.Request(t, ts.Client, http.MethodGet, "/stamps/"+hex.EncodeToString(badBatch), http.StatusBadRequest,
......@@ -259,6 +261,47 @@ func TestPostageGetStamp(t *testing.T) {
})
}
func TestPostageGetBuckets(t *testing.T) {
si := postage.NewStampIssuer("", "", batchOk, big.NewInt(3), 11, 10, 1000, true)
mp := mockpost.New(mockpost.WithIssuer(si))
ts := newTestServer(t, testServerOptions{Post: mp})
buckets := make([]debugapi.BucketData, 1024)
for i := range buckets {
buckets[i] = debugapi.BucketData{BucketID: uint32(i)}
}
t.Run("ok", func(t *testing.T) {
jsonhttptest.Request(t, ts.Client, http.MethodGet, "/stamps/"+batchOkStr+"/buckets", http.StatusOK,
jsonhttptest.WithExpectedJSONResponse(&debugapi.PostageStampBucketsResponse{
Depth: si.Depth(),
BucketDepth: si.BucketDepth(),
BucketUpperBound: si.BucketUpperBound(),
Buckets: buckets,
}),
)
})
t.Run("bad batch", func(t *testing.T) {
badBatch := []byte{0, 1, 2}
jsonhttptest.Request(t, ts.Client, http.MethodGet, "/stamps/"+hex.EncodeToString(badBatch)+"/buckets", http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid batchID",
}),
)
})
t.Run("bad batch", func(t *testing.T) {
badBatch := []byte{0, 1, 2, 4}
jsonhttptest.Request(t, ts.Client, http.MethodGet, "/stamps/"+hex.EncodeToString(badBatch)+"/buckets", http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid batchID",
}),
)
})
}
func TestReserveState(t *testing.T) {
t.Run("ok", func(t *testing.T) {
ts := newTestServer(t, testServerOptions{
......
......@@ -198,15 +198,15 @@ func (s *Service) newRouter() *mux.Router {
})),
)
router.Handle("/stamps/{amount}/{depth}", web.ChainHandlers(
router.Handle("/stamps/{id}/buckets", web.ChainHandlers(
web.FinalHandler(jsonhttp.MethodHandler{
"POST": http.HandlerFunc(s.postageCreateHandler),
"GET": http.HandlerFunc(s.postageGetStampBucketsHandler),
})),
)
router.Handle("/stamps/default/{id}", web.ChainHandlers(
router.Handle("/stamps/{amount}/{depth}", web.ChainHandlers(
web.FinalHandler(jsonhttp.MethodHandler{
"PUT": http.HandlerFunc(s.postageSetDefaultStampIssuerHandler),
"POST": http.HandlerFunc(s.postageCreateHandler),
})),
)
......
......@@ -14,6 +14,7 @@ import (
"context"
"errors"
"fmt"
"golang.org/x/sync/semaphore"
"sync"
"time"
......@@ -23,10 +24,9 @@ import (
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/protobuf"
"github.com/ethersphere/bee/pkg/ratelimit"
"github.com/ethersphere/bee/pkg/swarm"
ma "github.com/multiformats/go-multiaddr"
"golang.org/x/time/rate"
)
const (
......@@ -38,31 +38,43 @@ const (
)
var (
limitBurst = 4 * int(swarm.MaxBins)
limitRate = time.Minute
ErrRateLimitExceeded = errors.New("rate limit exceeded")
limitBurst = 4 * int(swarm.MaxBins)
limitRate = rate.Every(time.Minute)
)
type Service struct {
streamer p2p.Streamer
streamer p2p.StreamerPinger
addressBook addressbook.GetPutter
addPeersHandler func(...swarm.Address)
networkID uint64
logger logging.Logger
metrics metrics
limiter map[string]*rate.Limiter
limiterLock sync.Mutex
inLimiter *ratelimit.Limiter
outLimiter *ratelimit.Limiter
clearMtx sync.Mutex
quit chan struct{}
wg sync.WaitGroup
peersChan chan pb.Peers
sem *semaphore.Weighted
}
func New(streamer p2p.Streamer, addressbook addressbook.GetPutter, networkID uint64, logger logging.Logger) *Service {
return &Service{
func New(streamer p2p.StreamerPinger, addressbook addressbook.GetPutter, networkID uint64, logger logging.Logger) *Service {
svc := &Service{
streamer: streamer,
logger: logger,
addressBook: addressbook,
networkID: networkID,
metrics: newMetrics(),
limiter: make(map[string]*rate.Limiter),
inLimiter: ratelimit.New(limitRate, limitBurst),
outLimiter: ratelimit.New(limitRate, limitBurst),
quit: make(chan struct{}),
peersChan: make(chan pb.Peers),
sem: semaphore.NewWeighted(int64(31)),
}
svc.startCheckPeersHandler()
return svc
}
func (s *Service) Protocol() p2p.ProtocolSpec {
......@@ -89,6 +101,12 @@ func (s *Service) BroadcastPeers(ctx context.Context, addressee swarm.Address, p
if max > len(peers) {
max = len(peers)
}
// If broadcasting limit is exceeded, return early
if !s.outLimiter.Allow(addressee.ByteString(), max) {
return nil
}
if err := s.sendPeers(ctx, addressee, peers[:max]); err != nil {
return err
}
......@@ -103,6 +121,23 @@ func (s *Service) SetAddPeersHandler(h func(addr ...swarm.Address)) {
s.addPeersHandler = h
}
func (s *Service) Close() error {
close(s.quit)
stopped := make(chan struct{})
go func() {
defer close(stopped)
s.wg.Wait()
}()
select {
case <-stopped:
return nil
case <-time.After(time.Second * 5):
return errors.New("hive: waited 5 seconds to close active goroutines")
}
}
func (s *Service) sendPeers(ctx context.Context, peer swarm.Address, peers []swarm.Address) (err error) {
s.metrics.BroadcastPeersSends.Inc()
stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, peersStreamName)
......@@ -158,9 +193,9 @@ func (s *Service) peersHandler(ctx context.Context, peer p2p.Peer, stream p2p.St
s.metrics.PeersHandlerPeers.Add(float64(len(peersReq.Peers)))
if err := s.rateLimitPeer(peer.Address, len(peersReq.Peers)); err != nil {
if !s.inLimiter.Allow(peer.Address.ByteString(), len(peersReq.Peers)) {
_ = stream.Reset()
return err
return ErrRateLimitExceeded
}
// close the stream before processing in order to unblock the sending side
......@@ -168,63 +203,108 @@ func (s *Service) peersHandler(ctx context.Context, peer p2p.Peer, stream p2p.St
// but we still want to handle not closed stream from the other side to avoid zombie stream
go stream.FullClose()
var peers []swarm.Address
for _, newPeer := range peersReq.Peers {
multiUnderlay, err := ma.NewMultiaddrBytes(newPeer.Underlay)
if err != nil {
s.logger.Errorf("hive: multi address underlay err: %v", err)
continue
}
select {
case s.peersChan <- peersReq:
case <-s.quit:
return errors.New("failed to process peers, shutting down hive")
}
bzzAddress := bzz.Address{
Overlay: swarm.NewAddress(newPeer.Overlay),
Underlay: multiUnderlay,
Signature: newPeer.Signature,
Transaction: newPeer.Transaction,
}
return nil
}
err = s.addressBook.Put(bzzAddress.Overlay, bzzAddress)
if err != nil {
s.logger.Warningf("skipping peer in response %s: %v", newPeer.String(), err)
continue
}
func (s *Service) disconnect(peer p2p.Peer) error {
peers = append(peers, bzzAddress.Overlay)
}
s.clearMtx.Lock()
defer s.clearMtx.Unlock()
if s.addPeersHandler != nil {
s.addPeersHandler(peers...)
}
s.inLimiter.Clear(peer.Address.ByteString())
s.outLimiter.Clear(peer.Address.ByteString())
return nil
}
func (s *Service) rateLimitPeer(peer swarm.Address, count int) error {
func (s *Service) startCheckPeersHandler() {
ctx, cancel := context.WithCancel(context.Background())
s.wg.Add(1)
go func() {
defer s.wg.Done()
<-s.quit
cancel()
}()
s.limiterLock.Lock()
defer s.limiterLock.Unlock()
s.wg.Add(1)
go func() {
defer s.wg.Done()
for {
select {
case <-ctx.Done():
return
case newPeers := <-s.peersChan:
s.wg.Add(1)
go func() {
defer s.wg.Done()
s.checkAndAddPeers(ctx, newPeers)
}()
}
}
}()
}
addr := peer.ByteString()
func (s *Service) checkAndAddPeers(ctx context.Context, peers pb.Peers) {
limiter, ok := s.limiter[addr]
if !ok {
limiter = rate.NewLimiter(limitRate, limitBurst)
s.limiter[addr] = limiter
}
var peersToAdd []swarm.Address
mtx := sync.Mutex{}
wg := sync.WaitGroup{}
if limiter.AllowN(time.Now(), count) {
return nil
}
for _, p := range peers.Peers {
err := s.sem.Acquire(ctx, 1)
if err != nil {
return
}
return ErrRateLimitExceeded
}
wg.Add(1)
go func(newPeer *pb.BzzAddress) {
defer func() {
s.sem.Release(1)
wg.Done()
}()
multiUnderlay, err := ma.NewMultiaddrBytes(newPeer.Underlay)
if err != nil {
s.logger.Errorf("hive: multi address underlay err: %v", err)
return
}
func (s *Service) disconnect(peer p2p.Peer) error {
s.limiterLock.Lock()
defer s.limiterLock.Unlock()
// check if the underlay is usable by doing a raw ping using libp2p
_, err = s.streamer.Ping(ctx, multiUnderlay)
if err != nil {
s.metrics.UnreachablePeers.Inc()
s.logger.Warningf("hive: multi address underlay %s not reachable err: %w", multiUnderlay, err)
return
}
delete(s.limiter, peer.Address.String())
bzzAddress := bzz.Address{
Overlay: swarm.NewAddress(newPeer.Overlay),
Underlay: multiUnderlay,
Signature: newPeer.Signature,
Transaction: newPeer.Transaction,
}
return nil
err = s.addressBook.Put(bzzAddress.Overlay, bzzAddress)
if err != nil {
s.logger.Warningf("skipping peer in response %s: %v", newPeer.String(), err)
return
}
mtx.Lock()
peersToAdd = append(peersToAdd, bzzAddress.Overlay)
mtx.Unlock()
}(p)
}
wg.Wait()
if s.addPeersHandler != nil && len(peersToAdd) > 0 {
s.addPeersHandler(peersToAdd...)
}
}
......@@ -46,8 +46,10 @@ func TestHandlerRateLimit(t *testing.T) {
addressbookclean := ab.New(mock.NewStateStore())
// new recorder for handling Ping
streamer := streamtest.New()
// create a hive server that handles the incoming stream
server := hive.New(nil, addressbookclean, networkID, logger)
server := hive.New(streamer, addressbookclean, networkID, logger)
serverAddress := test.RandomAddress()
......@@ -98,8 +100,9 @@ func TestHandlerRateLimit(t *testing.T) {
}
lastRec := rec[len(rec)-1]
if !errors.Is(lastRec.Err(), hive.ErrRateLimitExceeded) {
t.Fatal(err)
if lastRec.Err() != nil {
t.Fatal("want nil error")
}
}
......@@ -160,6 +163,7 @@ func TestBroadcastPeers(t *testing.T) {
wantMsgs []pb.Peers
wantOverlays []swarm.Address
wantBzzAddresses []bzz.Address
pingErr func(addr ma.Multiaddr) (time.Duration, error)
}{
"OK - single record": {
addresee: swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59c"),
......@@ -196,14 +200,36 @@ func TestBroadcastPeers(t *testing.T) {
wantOverlays: overlays[:2*hive.MaxBatchSize],
wantBzzAddresses: bzzAddresses[:2*hive.MaxBatchSize],
},
"OK - single batch - skip ping failures": {
addresee: swarm.MustParseHexAddress("ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59c"),
peers: overlays[:15],
wantMsgs: []pb.Peers{{Peers: wantMsgs[0].Peers[:15]}},
wantOverlays: overlays[:10],
wantBzzAddresses: bzzAddresses[:10],
pingErr: func(addr ma.Multiaddr) (rtt time.Duration, err error) {
for _, v := range bzzAddresses[10:15] {
if v.Underlay.Equal(addr) {
return rtt, errors.New("ping failure")
}
}
return rtt, nil
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
addressbookclean := ab.New(mock.NewStateStore())
// new recorder for handling Ping
var streamer *streamtest.Recorder
if tc.pingErr != nil {
streamer = streamtest.New(streamtest.WithPingErr(tc.pingErr))
} else {
streamer = streamtest.New()
}
// create a hive server that handles the incoming stream
server := hive.New(nil, addressbookclean, networkID, logger)
server := hive.New(streamer, addressbookclean, networkID, logger)
// setup the stream recorder to record stream data
recorder := streamtest.New(
......
......@@ -16,6 +16,7 @@ type metrics struct {
PeersHandler prometheus.Counter
PeersHandlerPeers prometheus.Counter
UnreachablePeers prometheus.Counter
}
func newMetrics() metrics {
......@@ -52,6 +53,12 @@ func newMetrics() metrics {
Name: "peers_handler_peers_count",
Help: "Number of peers received in peer messages.",
}),
UnreachablePeers: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: m.Namespace,
Subsystem: subsystem,
Name: "unreachable_peers_count",
Help: "Number of peers that are unreachable.",
}),
}
}
......
......@@ -137,8 +137,8 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e
if err != nil {
return nil, err
}
gcSizeChange += c
}
gcSizeChange += c
}
case storage.ModePutSync:
......
......@@ -260,6 +260,7 @@ func TestModePutUpload(t *testing.T) {
newPinIndexTest(db, ch, leveldb.ErrNotFound)(t)
}
newItemsCountTest(db.postageIndexIndex, tc.count)(t)
newIndexGCSizeTest(db)(t)
})
}
}
......@@ -298,6 +299,7 @@ func TestModePutUploadPin(t *testing.T) {
newPinIndexTest(db, ch, nil)(t)
}
newItemsCountTest(db.postageIndexIndex, tc.count)(t)
newIndexGCSizeTest(db)(t)
})
}
}
......@@ -484,6 +486,7 @@ func TestModePut_sameChunk(t *testing.T) {
newItemsCountTest(db.retrievalDataIndex, tc.count)(t)
newItemsCountTest(db.pullIndex, count(tcn.pullIndex))(t)
newItemsCountTest(db.pushIndex, count(tcn.pushIndex))(t)
newIndexGCSizeTest(db)(t)
}
})
}
......
......@@ -261,7 +261,11 @@ func (db *DB) setRemove(batch *leveldb.Batch, item shed.Item, check bool) (gcSiz
func (db *DB) setPin(batch *leveldb.Batch, item shed.Item) (gcSizeChange int64, err error) {
// Get the existing pin counter of the chunk
i, err := db.pinIndex.Get(item)
// this will not panic because shed.Index.Get returns an instance, not a pointer.
// we therefore leverage the default value of the pin counter on the item (zero).
item.PinCounter = i.PinCounter
if err != nil {
if !errors.Is(err, leveldb.ErrNotFound) {
return 0, err
......
......@@ -14,6 +14,7 @@ import (
"fmt"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/recovery"
"github.com/ethersphere/bee/pkg/retrieval"
"github.com/ethersphere/bee/pkg/sctx"
......@@ -25,7 +26,7 @@ type store struct {
storage.Storer
retrieval retrieval.Interface
logger logging.Logger
validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error)
validStamp postage.ValidStampFn
recoveryCallback recovery.Callback // this is the callback to be executed when a chunk fails to be retrieved
}
......@@ -34,7 +35,7 @@ var (
)
// New returns a new NetStore that wraps a given Storer.
func New(s storage.Storer, validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error), rcb recovery.Callback, r retrieval.Interface, logger logging.Logger) storage.Storer {
func New(s storage.Storer, validStamp postage.ValidStampFn, rcb recovery.Callback, r retrieval.Interface, logger logging.Logger) storage.Storer {
return &store{Storer: s, validStamp: validStamp, recoveryCallback: rcb, retrieval: r, logger: logger}
}
......
......@@ -16,6 +16,7 @@ import (
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/netstore"
"github.com/ethersphere/bee/pkg/postage"
postagetesting "github.com/ethersphere/bee/pkg/postage/testing"
"github.com/ethersphere/bee/pkg/pss"
"github.com/ethersphere/bee/pkg/recovery"
......@@ -186,7 +187,7 @@ func TestInvalidPostageStamp(t *testing.T) {
}
// returns a mock retrieval protocol, a mock local storage and a netstore
func newRetrievingNetstore(rec recovery.Callback, validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error)) (ret *retrievalMock, mockStore *mock.MockStorer, ns storage.Storer) {
func newRetrievingNetstore(rec recovery.Callback, validStamp postage.ValidStampFn) (ret *retrievalMock, mockStore *mock.MockStorer, ns storage.Storer) {
retrieve := &retrievalMock{}
store := mock.NewStorer()
logger := logging.New(ioutil.Discard, 0)
......
......@@ -15,6 +15,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethersphere/bee/pkg/config"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p/libp2p"
......@@ -82,7 +83,9 @@ func InitChequebookFactory(
var currentFactory common.Address
var legacyFactories []common.Address
foundFactory, foundLegacyFactories, found := chequebook.DiscoverFactoryAddress(chainID)
chainCfg, found := config.GetChainConfig(chainID)
foundFactory, foundLegacyFactories := chainCfg.CurrentFactory, chainCfg.LegacyFactories
if factoryAddress == "" {
if !found {
return nil, fmt.Errorf("no known factory address for this network (chain id: %d)", chainID)
......@@ -211,8 +214,8 @@ func InitSwap(
var currentPriceOracleAddress common.Address
if priceOracleAddress == "" {
var found bool
currentPriceOracleAddress, found = priceoracle.DiscoverPriceOracleAddress(chainID)
chainCfg, found := config.GetChainConfig(chainID)
currentPriceOracleAddress = chainCfg.PriceOracleAddress
if !found {
return nil, nil, errors.New("no known price oracle address for this network")
}
......
This diff is collapsed.
......@@ -56,7 +56,7 @@ var (
// AdvertisableAddressResolver can Resolve a Multiaddress.
type AdvertisableAddressResolver interface {
Resolve(observedAdddress ma.Multiaddr) (ma.Multiaddr, error)
Resolve(observedAddress ma.Multiaddr) (ma.Multiaddr, error)
}
type SenderMatcher interface {
......
......@@ -729,7 +729,7 @@ type AdvertisableAddresserMock struct {
err error
}
func (a *AdvertisableAddresserMock) Resolve(observedAdddress ma.Multiaddr) (ma.Multiaddr, error) {
func (a *AdvertisableAddresserMock) Resolve(observedAddress ma.Multiaddr) (ma.Multiaddr, error) {
if a.err != nil {
return nil, a.err
}
......@@ -738,7 +738,7 @@ func (a *AdvertisableAddresserMock) Resolve(observedAdddress ma.Multiaddr) (ma.M
return a.advertisableAddress, nil
}
return observedAdddress, nil
return observedAddress, nil
}
type MockSenderMatcher struct {
......
......@@ -37,6 +37,7 @@ import (
libp2pquic "github.com/libp2p/go-libp2p-quic-transport"
tptu "github.com/libp2p/go-libp2p-transport-upgrader"
basichost "github.com/libp2p/go-libp2p/p2p/host/basic"
libp2pping "github.com/libp2p/go-libp2p/p2p/protocol/ping"
"github.com/libp2p/go-tcp-transport"
ws "github.com/libp2p/go-ws-transport"
ma "github.com/multiformats/go-multiaddr"
......@@ -56,6 +57,7 @@ type Service struct {
natManager basichost.NATManager
natAddrResolver *staticAddressResolver
autonatDialer host.Host
pingDialer host.Host
libp2pPeerstore peerstore.Peerstore
metrics metrics
networkID uint64
......@@ -87,7 +89,6 @@ type Options struct {
NATAddr string
EnableWS bool
EnableQUIC bool
Standalone bool
FullNode bool
LightNodeLimit int
WelcomeMessage string
......@@ -178,10 +179,6 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
transports = append(transports, libp2p.Transport(libp2pquic.NewTransport))
}
if o.Standalone {
opts = append(opts, libp2p.NoListenAddrs)
}
opts = append(opts, transports...)
h, err := libp2p.New(ctx, opts...)
......@@ -222,6 +219,16 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
return nil, fmt.Errorf("handshake service: %w", err)
}
// Create a new dialer for libp2p ping protocol. This ensures that the protocol
// uses a different set of keys to do ping. It prevents inconsistencies in peerstore as
// the addresses used are not dialable and hence should be cleaned up. We should create
// this host with the same transports and security options to be able to dial to other
// peers.
pingDialer, err := libp2p.New(ctx, append(transports, security, libp2p.NoListenAddrs)...)
if err != nil {
return nil, err
}
peerRegistry := newPeerRegistry()
s := &Service{
ctx: ctx,
......@@ -229,6 +236,7 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
natManager: natManager,
natAddrResolver: natAddrResolver,
autonatDialer: dialer,
pingDialer: pingDialer,
handshakeService: handshakeService,
libp2pPeerstore: libp2pPeerstore,
metrics: newMetrics(),
......@@ -795,6 +803,9 @@ func (s *Service) Close() error {
if err := s.autonatDialer.Close(); err != nil {
return err
}
if err := s.pingDialer.Close(); err != nil {
return err
}
return s.host.Close()
}
......@@ -816,3 +827,20 @@ func (s *Service) Ready() {
func (s *Service) Halt() {
close(s.halt)
}
func (s *Service) Ping(ctx context.Context, addr ma.Multiaddr) (rtt time.Duration, err error) {
info, err := libp2ppeer.AddrInfoFromP2pAddr(addr)
if err != nil {
return rtt, fmt.Errorf("unable to parse underlay address: %w", err)
}
// Add the address to libp2p peerstore for it to be dialable
s.pingDialer.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.TempAddrTTL)
select {
case <-ctx.Done():
return rtt, ctx.Err()
case res := <-libp2pping.Ping(ctx, s.pingDialer, info.ID):
return res.RTT, res.Error
}
}
......@@ -378,6 +378,30 @@ func TestConnectDisconnectEvents(t *testing.T) {
}
func TestPing(t *testing.T) {
t.Skip("test flaking")
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
s1, _ := newService(t, 1, libp2pServiceOpts{})
s2, _ := newService(t, 1, libp2pServiceOpts{})
// Wait for listeners to start. There are times when the test fails unexpectedly
// during CI and we suspect it is due to the listeners not starting in time. The
// sleep here ensures CPU is given up for any goroutines which are not getting
// scheduled. Ideally we should explicitly check the TCP status on the port
// where the libp2p.Host is started before assuming the host is up. This seems like
// a bit of an overkill here unless the test starts flaking.
time.Sleep(time.Second)
addr := serviceUnderlayAddress(t, s1)
if _, err := s2.Ping(ctx, addr); err != nil {
t.Fatal(err)
}
}
const (
testProtocolName = "testing"
testProtocolVersion = "2.3.4"
......
......@@ -70,6 +70,18 @@ type StreamerDisconnecter interface {
Disconnecter
}
// Pinger interface is used to ping a underlay address which is not yet known to the bee node.
// It uses libp2p's default ping protocol. This is different from the PingPong protocol as this
// is meant to be used before we know a particular underlay and we can consider it useful
type Pinger interface {
Ping(ctx context.Context, addr ma.Multiaddr) (rtt time.Duration, err error)
}
type StreamerPinger interface {
Streamer
Pinger
}
// Stream represent a bidirectional data Stream.
type Stream interface {
io.ReadWriter
......
......@@ -14,6 +14,7 @@ import (
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/swarm"
ma "github.com/multiformats/go-multiaddr"
)
var (
......@@ -37,6 +38,7 @@ type Recorder struct {
protocols []p2p.ProtocolSpec
middlewares []p2p.HandlerMiddleware
streamErr func(swarm.Address, string, string, string) error
pingErr func(ma.Multiaddr) (time.Duration, error)
protocolsWithPeers map[string]p2p.ProtocolSpec
}
......@@ -76,6 +78,12 @@ func WithStreamError(streamErr func(swarm.Address, string, string, string) error
})
}
func WithPingErr(pingErr func(ma.Multiaddr) (time.Duration, error)) Option {
return optionFunc(func(r *Recorder) {
r.pingErr = pingErr
})
}
func New(opts ...Option) *Recorder {
r := &Recorder{
records: make(map[string][]*Record),
......@@ -153,6 +161,13 @@ func (r *Recorder) NewStream(ctx context.Context, addr swarm.Address, h p2p.Head
return streamOut, nil
}
func (r *Recorder) Ping(ctx context.Context, addr ma.Multiaddr) (rtt time.Duration, err error) {
if r.pingErr != nil {
return r.pingErr(addr)
}
return rtt, err
}
func (r *Recorder) Records(addr swarm.Address, protocolName, protocolVersio, streamName string) ([]*Record, error) {
id := addr.String() + p2p.NewSwarmStreamName(protocolName, protocolVersio, streamName)
......
......@@ -18,6 +18,7 @@ import (
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/streamtest"
"github.com/ethersphere/bee/pkg/swarm"
ma "github.com/multiformats/go-multiaddr"
"golang.org/x/sync/errgroup"
)
......@@ -758,6 +759,28 @@ func TestRecorder_withStreamError(t *testing.T) {
}, nil)
}
func TestRecorder_ping(t *testing.T) {
testAddr, _ := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0")
rec := streamtest.New()
_, err := rec.Ping(context.Background(), testAddr)
if err != nil {
t.Fatalf("unable to ping err: %s", err.Error())
}
rec2 := streamtest.New(
streamtest.WithPingErr(func(_ ma.Multiaddr) (rtt time.Duration, err error) {
return rtt, errors.New("fail")
}),
)
_, err = rec2.Ping(context.Background(), testAddr)
if err == nil {
t.Fatal("expected ping err")
}
}
const (
testProtocolName = "testing"
testProtocolVersion = "1.0.1"
......
......@@ -9,14 +9,19 @@ import (
"encoding/hex"
"errors"
"fmt"
"hash"
"math/big"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/storage"
"golang.org/x/crypto/sha3"
)
const dirtyDBKey = "batchservice_dirty_db"
const (
dirtyDBKey = "batchservice_dirty_db"
checksumDBKey = "batchservice_checksum"
)
type batchService struct {
stateStore storage.StateStorer
......@@ -25,6 +30,8 @@ type batchService struct {
listener postage.Listener
owner []byte
batchListener postage.BatchCreationListener
checksum hash.Hash // checksum hasher
}
type Interface interface {
......@@ -39,13 +46,40 @@ func New(
listener postage.Listener,
owner []byte,
batchListener postage.BatchCreationListener,
) Interface {
return &batchService{stateStore, storer, logger, listener, owner, batchListener}
checksumFunc func() hash.Hash,
) (Interface, error) {
if checksumFunc == nil {
checksumFunc = sha3.New256
}
var (
b string
sum = checksumFunc()
)
if err := stateStore.Get(checksumDBKey, &b); err != nil {
if !errors.Is(err, storage.ErrNotFound) {
return nil, err
}
} else {
s, err := hex.DecodeString(b)
if err != nil {
return nil, err
}
n, err := sum.Write(s)
if err != nil {
return nil, err
}
if n != len(s) {
return nil, errors.New("batchstore checksum init")
}
}
return &batchService{stateStore, storer, logger, listener, owner, batchListener, sum}, nil
}
// Create will create a new batch with the given ID, owner value and depth and
// stores it in the BatchStore.
func (svc *batchService) Create(id, owner []byte, normalisedBalance *big.Int, depth, bucketDepth uint8, immutable bool) error {
func (svc *batchService) Create(id, owner []byte, normalisedBalance *big.Int, depth, bucketDepth uint8, immutable bool, txHash []byte) error {
b := &postage.Batch{
ID: id,
Owner: owner,
......@@ -64,14 +98,18 @@ func (svc *batchService) Create(id, owner []byte, normalisedBalance *big.Int, de
if bytes.Equal(svc.owner, owner) && svc.batchListener != nil {
svc.batchListener.Handle(b)
}
cs, err := svc.updateChecksum(txHash)
if err != nil {
return fmt.Errorf("update checksum: %w", err)
}
svc.logger.Debugf("batch service: created batch id %s", hex.EncodeToString(b.ID))
svc.logger.Debugf("batch service: created batch id %s, tx %x, checksum %x", hex.EncodeToString(b.ID), txHash, cs)
return nil
}
// TopUp implements the EventUpdater interface. It tops ups a batch with the
// given ID with the given amount.
func (svc *batchService) TopUp(id []byte, normalisedBalance *big.Int) error {
func (svc *batchService) TopUp(id []byte, normalisedBalance *big.Int, txHash []byte) error {
b, err := svc.storer.Get(id)
if err != nil {
return fmt.Errorf("get: %w", err)
......@@ -81,14 +119,18 @@ func (svc *batchService) TopUp(id []byte, normalisedBalance *big.Int) error {
if err != nil {
return fmt.Errorf("put: %w", err)
}
cs, err := svc.updateChecksum(txHash)
if err != nil {
return fmt.Errorf("update checksum: %w", err)
}
svc.logger.Debugf("batch service: topped up batch id %s from %v to %v", hex.EncodeToString(b.ID), b.Value, normalisedBalance)
svc.logger.Debugf("batch service: topped up batch id %s from %v to %v, tx %x, checksum %x", hex.EncodeToString(b.ID), b.Value, normalisedBalance, txHash, cs)
return nil
}
// UpdateDepth implements the EventUpdater inteface. It sets the new depth of a
// batch with the given ID.
func (svc *batchService) UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int) error {
func (svc *batchService) UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int, txHash []byte) error {
b, err := svc.storer.Get(id)
if err != nil {
return fmt.Errorf("get: %w", err)
......@@ -97,21 +139,30 @@ func (svc *batchService) UpdateDepth(id []byte, depth uint8, normalisedBalance *
if err != nil {
return fmt.Errorf("put: %w", err)
}
cs, err := svc.updateChecksum(txHash)
if err != nil {
return fmt.Errorf("update checksum: %w", err)
}
svc.logger.Debugf("batch service: updated depth of batch id %s from %d to %d", hex.EncodeToString(b.ID), b.Depth, depth)
svc.logger.Debugf("batch service: updated depth of batch id %s from %d to %d, tx %x, checksum %x", hex.EncodeToString(b.ID), b.Depth, depth, txHash, cs)
return nil
}
// UpdatePrice implements the EventUpdater interface. It sets the current
// price from the chain in the service chain state.
func (svc *batchService) UpdatePrice(price *big.Int) error {
func (svc *batchService) UpdatePrice(price *big.Int, txHash []byte) error {
cs := svc.storer.GetChainState()
cs.CurrentPrice = price
if err := svc.storer.PutChainState(cs); err != nil {
return fmt.Errorf("put chain state: %w", err)
}
svc.logger.Debugf("batch service: updated chain price to %s", price)
sum, err := svc.updateChecksum(txHash)
if err != nil {
return fmt.Errorf("update checksum: %w", err)
}
svc.logger.Debugf("batch service: updated chain price to %s, tx %x, checksum %x", price, txHash, sum)
return nil
}
......@@ -161,3 +212,29 @@ func (svc *batchService) Start(startBlock uint64) (<-chan struct{}, error) {
}
return svc.listener.Listen(startBlock+1, svc), nil
}
// updateChecksum updates the batchservice checksum once an event gets
// processed. It swaps the existing checksum which is in the hasher
// with the new checksum and persists it in the statestore.
func (svc *batchService) updateChecksum(txHash []byte) ([]byte, error) {
n, err := svc.checksum.Write(txHash)
if err != nil {
return nil, err
}
if l := len(txHash); l != n {
return nil, fmt.Errorf("update checksum wrote %d bytes but want %d bytes", n, l)
}
s := svc.checksum.Sum(nil)
svc.checksum.Reset()
n, err = svc.checksum.Write(s)
if err != nil {
return nil, err
}
if l := len(s); l != n {
return nil, fmt.Errorf("swap checksum wrote %d bytes but want %d bytes", n, l)
}
b := hex.EncodeToString(s)
return s, svc.stateStore.Put(checksumDBKey, b)
}
......@@ -7,6 +7,7 @@ package batchservice_test
import (
"bytes"
"errors"
"hash"
"io/ioutil"
"math/big"
"math/rand"
......@@ -22,8 +23,9 @@ import (
)
var (
testLog = logging.New(ioutil.Discard, 0)
errTest = errors.New("fails")
testLog = logging.New(ioutil.Discard, 0)
errTest = errors.New("fails")
testTxHash = make([]byte, 32)
)
type mockListener struct {
......@@ -51,6 +53,7 @@ func TestBatchServiceCreate(t *testing.T) {
testBatch := postagetesting.MustNewBatch()
testBatchListener := &mockBatchCreationHandler{}
svc, _, _ := newTestStoreAndServiceWithListener(
t,
testBatch.Owner,
testBatchListener,
mock.WithChainState(testChainState),
......@@ -64,6 +67,7 @@ func TestBatchServiceCreate(t *testing.T) {
testBatch.Depth,
testBatch.BucketDepth,
testBatch.Immutable,
testTxHash,
); err == nil {
t.Fatalf("expected error")
}
......@@ -105,6 +109,7 @@ func TestBatchServiceCreate(t *testing.T) {
testBatch := postagetesting.MustNewBatch()
testBatchListener := &mockBatchCreationHandler{}
svc, batchStore, _ := newTestStoreAndServiceWithListener(
t,
testBatch.Owner,
testBatchListener,
mock.WithChainState(testChainState),
......@@ -117,6 +122,7 @@ func TestBatchServiceCreate(t *testing.T) {
testBatch.Depth,
testBatch.BucketDepth,
testBatch.Immutable,
testTxHash,
); err != nil {
t.Fatalf("got error %v", err)
}
......@@ -135,6 +141,7 @@ func TestBatchServiceCreate(t *testing.T) {
rand.Read(owner)
svc, batchStore, _ := newTestStoreAndServiceWithListener(
t,
owner,
testBatchListener,
mock.WithChainState(testChainState),
......@@ -147,6 +154,7 @@ func TestBatchServiceCreate(t *testing.T) {
testBatch.Depth,
testBatch.BucketDepth,
testBatch.Immutable,
testTxHash,
); err != nil {
t.Fatalf("got error %v", err)
}
......@@ -164,32 +172,34 @@ func TestBatchServiceTopUp(t *testing.T) {
t.Run("expect get error", func(t *testing.T) {
svc, _, _ := newTestStoreAndService(
t,
mock.WithGetErr(errTest, 0),
)
if err := svc.TopUp(testBatch.ID, testNormalisedBalance); err == nil {
if err := svc.TopUp(testBatch.ID, testNormalisedBalance, testTxHash); err == nil {
t.Fatal("expected error")
}
})
t.Run("expect put error", func(t *testing.T) {
svc, batchStore, _ := newTestStoreAndService(
t,
mock.WithPutErr(errTest, 1),
)
putBatch(t, batchStore, testBatch)
if err := svc.TopUp(testBatch.ID, testNormalisedBalance); err == nil {
if err := svc.TopUp(testBatch.ID, testNormalisedBalance, testTxHash); err == nil {
t.Fatal("expected error")
}
})
t.Run("passes", func(t *testing.T) {
svc, batchStore, _ := newTestStoreAndService()
svc, batchStore, _ := newTestStoreAndService(t)
putBatch(t, batchStore, testBatch)
want := testNormalisedBalance
if err := svc.TopUp(testBatch.ID, testNormalisedBalance); err != nil {
if err := svc.TopUp(testBatch.ID, testNormalisedBalance, testTxHash); err != nil {
t.Fatalf("top up: %v", err)
}
......@@ -211,30 +221,32 @@ func TestBatchServiceUpdateDepth(t *testing.T) {
t.Run("expect get error", func(t *testing.T) {
svc, _, _ := newTestStoreAndService(
t,
mock.WithGetErr(errTest, 0),
)
if err := svc.UpdateDepth(testBatch.ID, testNewDepth, testNormalisedBalance); err == nil {
if err := svc.UpdateDepth(testBatch.ID, testNewDepth, testNormalisedBalance, testTxHash); err == nil {
t.Fatal("expected get error")
}
})
t.Run("expect put error", func(t *testing.T) {
svc, batchStore, _ := newTestStoreAndService(
t,
mock.WithPutErr(errTest, 1),
)
putBatch(t, batchStore, testBatch)
if err := svc.UpdateDepth(testBatch.ID, testNewDepth, testNormalisedBalance); err == nil {
if err := svc.UpdateDepth(testBatch.ID, testNewDepth, testNormalisedBalance, testTxHash); err == nil {
t.Fatal("expected put error")
}
})
t.Run("passes", func(t *testing.T) {
svc, batchStore, _ := newTestStoreAndService()
svc, batchStore, _ := newTestStoreAndService(t)
putBatch(t, batchStore, testBatch)
if err := svc.UpdateDepth(testBatch.ID, testNewDepth, testNormalisedBalance); err != nil {
if err := svc.UpdateDepth(testBatch.ID, testNewDepth, testNormalisedBalance, testTxHash); err != nil {
t.Fatalf("update depth: %v", err)
}
......@@ -256,22 +268,24 @@ func TestBatchServiceUpdatePrice(t *testing.T) {
t.Run("expect put error", func(t *testing.T) {
svc, batchStore, _ := newTestStoreAndService(
t,
mock.WithChainState(testChainState),
mock.WithPutErr(errTest, 1),
)
putChainState(t, batchStore, testChainState)
if err := svc.UpdatePrice(testNewPrice); err == nil {
if err := svc.UpdatePrice(testNewPrice, testTxHash); err == nil {
t.Fatal("expected error")
}
})
t.Run("passes", func(t *testing.T) {
svc, batchStore, _ := newTestStoreAndService(
t,
mock.WithChainState(testChainState),
)
if err := svc.UpdatePrice(testNewPrice); err != nil {
if err := svc.UpdatePrice(testNewPrice, testTxHash); err != nil {
t.Fatalf("update price: %v", err)
}
......@@ -288,6 +302,7 @@ func TestBatchServiceUpdateBlockNumber(t *testing.T) {
TotalAmount: big.NewInt(100),
}
svc, batchStore, _ := newTestStoreAndService(
t,
mock.WithChainState(testChainState),
)
......@@ -305,7 +320,7 @@ func TestBatchServiceUpdateBlockNumber(t *testing.T) {
}
func TestTransactionOk(t *testing.T) {
svc, store, s := newTestStoreAndService()
svc, store, s := newTestStoreAndService(t)
if _, err := svc.Start(10); err != nil {
t.Fatal(err)
}
......@@ -318,7 +333,10 @@ func TestTransactionOk(t *testing.T) {
t.Fatal(err)
}
svc2 := batchservice.New(s, store, testLog, newMockListener(), nil, nil)
svc2, err := batchservice.New(s, store, testLog, newMockListener(), nil, nil, nil)
if err != nil {
t.Fatal(err)
}
if _, err := svc2.Start(10); err != nil {
t.Fatal(err)
}
......@@ -329,7 +347,7 @@ func TestTransactionOk(t *testing.T) {
}
func TestTransactionFail(t *testing.T) {
svc, store, s := newTestStoreAndService()
svc, store, s := newTestStoreAndService(t)
if _, err := svc.Start(10); err != nil {
t.Fatal(err)
}
......@@ -338,7 +356,10 @@ func TestTransactionFail(t *testing.T) {
t.Fatal(err)
}
svc2 := batchservice.New(s, store, testLog, newMockListener(), nil, nil)
svc2, err := batchservice.New(s, store, testLog, newMockListener(), nil, nil, nil)
if err != nil {
t.Fatal(err)
}
if _, err := svc2.Start(10); err != nil {
t.Fatal(err)
}
......@@ -347,19 +368,47 @@ func TestTransactionFail(t *testing.T) {
t.Fatalf("expect %d reset calls got %d", 1, c)
}
}
func TestChecksum(t *testing.T) {
s := mocks.NewStateStore()
store := mock.New()
mockHash := &hs{}
svc, err := batchservice.New(s, store, testLog, newMockListener(), nil, nil, func() hash.Hash { return mockHash })
if err != nil {
t.Fatal(err)
}
testNormalisedBalance := big.NewInt(2000000000000)
testBatch := postagetesting.MustNewBatch()
putBatch(t, store, testBatch)
if err := svc.TopUp(testBatch.ID, testNormalisedBalance, testTxHash); err != nil {
t.Fatalf("top up: %v", err)
}
if m := mockHash.ctr; m != 2 {
t.Fatalf("expected %d calls got %d", 2, m)
}
}
func newTestStoreAndServiceWithListener(
t *testing.T,
owner []byte,
batchListener postage.BatchCreationListener,
opts ...mock.Option,
) (postage.EventUpdater, *mock.BatchStore, storage.StateStorer) {
t.Helper()
s := mocks.NewStateStore()
store := mock.New(opts...)
svc := batchservice.New(s, store, testLog, newMockListener(), owner, batchListener)
svc, err := batchservice.New(s, store, testLog, newMockListener(), owner, batchListener, nil)
if err != nil {
t.Fatal(err)
}
return svc, store, s
}
func newTestStoreAndService(opts ...mock.Option) (postage.EventUpdater, *mock.BatchStore, storage.StateStorer) {
return newTestStoreAndServiceWithListener(nil, nil, opts...)
func newTestStoreAndService(t *testing.T, opts ...mock.Option) (postage.EventUpdater, *mock.BatchStore, storage.StateStorer) {
t.Helper()
return newTestStoreAndServiceWithListener(t, nil, nil, opts...)
}
func putBatch(t *testing.T, store postage.Storer, b *postage.Batch) {
......@@ -377,3 +426,11 @@ func putChainState(t *testing.T, store postage.Storer, cs *postage.ChainState) {
t.Fatalf("store put chain state: %v", err)
}
}
type hs struct{ ctr uint8 }
func (h *hs) Write(p []byte) (n int, err error) { h.ctr++; return len(p), nil }
func (h *hs) Sum(b []byte) []byte { return []byte{h.ctr} }
func (h *hs) Reset() {}
func (h *hs) Size() int { panic("not implemented") }
func (h *hs) BlockSize() int { panic("not implemented") }
......@@ -92,7 +92,11 @@ func (bs *BatchStore) Get(id []byte) (*postage.Batch, error) {
}
bs.getErrDelayCnt--
}
if !bytes.Equal(bs.id, id) {
exists, err := bs.Exists(id)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.New("no such id")
}
return bs.batch, nil
......@@ -147,6 +151,11 @@ func (bs *BatchStore) SetRadiusSetter(r postage.RadiusSetter) {
panic("not implemented")
}
// Exists reports whether batch referenced by the give id exists.
func (bs *BatchStore) Exists(id []byte) (bool, error) {
return bytes.Equal(bs.id, id), nil
}
func (bs *BatchStore) Reset() error {
bs.resetCallCount++
return nil
......
......@@ -198,6 +198,18 @@ func (s *store) SetRadiusSetter(r postage.RadiusSetter) {
s.radiusSetter = r
}
// Exists reports whether batch referenced by the give id exists.
func (s *store) Exists(id []byte) (bool, error) {
switch err := s.store.Get(batchKey(id), new(postage.Batch)); {
case err == nil:
return true, nil
case errors.Is(err, storage.ErrNotFound):
return false, nil
default:
return false, err
}
}
func (s *store) Reset() error {
prefix := "batchstore_"
if err := s.store.Iterate(prefix, func(k, _ []byte) (bool, error) {
......
......@@ -12,10 +12,10 @@ import (
// EventUpdater interface definitions reflect the updates triggered by events
// emitted by the postage contract on the blockchain.
type EventUpdater interface {
Create(id []byte, owner []byte, normalisedBalance *big.Int, depth, bucketDepth uint8, immutable bool) error
TopUp(id []byte, normalisedBalance *big.Int) error
UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int) error
UpdatePrice(price *big.Int) error
Create(id []byte, owner []byte, normalisedBalance *big.Int, depth, bucketDepth uint8, immutable bool, txHash []byte) error
TopUp(id []byte, normalisedBalance *big.Int, txHash []byte) error
UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int, txHash []byte) error
UpdatePrice(price *big.Int, txHash []byte) error
UpdateBlockNumber(blockNumber uint64) error
Start(startBlock uint64) (<-chan struct{}, error)
......@@ -30,11 +30,12 @@ type UnreserveIteratorFn func(id []byte, radius uint8) (bool, error)
type Storer interface {
Get(id []byte) (*Batch, error)
Put(*Batch, *big.Int, uint8) error
PutChainState(*ChainState) error
GetChainState() *ChainState
PutChainState(*ChainState) error
GetReserveState() *ReserveState
SetRadiusSetter(RadiusSetter)
Unreserve(UnreserveIteratorFn) error
Exists(id []byte) (bool, error)
Reset() error
}
......
......@@ -118,6 +118,7 @@ func (l *listener) processEvent(e types.Log, updater postage.EventUpdater) error
c.Depth,
c.BucketDepth,
c.ImmutableFlag,
e.TxHash.Bytes(),
)
case batchTopupTopic:
c := &batchTopUpEvent{}
......@@ -129,6 +130,7 @@ func (l *listener) processEvent(e types.Log, updater postage.EventUpdater) error
return updater.TopUp(
c.BatchId[:],
c.NormalisedBalance,
e.TxHash.Bytes(),
)
case batchDepthIncreaseTopic:
c := &batchDepthIncreaseEvent{}
......@@ -141,6 +143,7 @@ func (l *listener) processEvent(e types.Log, updater postage.EventUpdater) error
c.BatchId[:],
c.NewDepth,
c.NormalisedBalance,
e.TxHash.Bytes(),
)
case priceUpdateTopic:
c := &priceUpdateEvent{}
......@@ -151,6 +154,7 @@ func (l *listener) processEvent(e types.Log, updater postage.EventUpdater) error
l.metrics.PriceCounter.Inc()
return updater.UpdatePrice(
c.Price,
e.TxHash.Bytes(),
)
default:
l.metrics.EventErrors.Inc()
......@@ -323,27 +327,6 @@ type priceUpdateEvent struct {
Price *big.Int
}
var (
GoerliChainID = int64(5)
GoerliPostageStampContractAddress = common.HexToAddress("0x621e455C4a139f5C4e4A8122Ce55Dc21630769E4")
GoerliStartBlock = uint64(4933174)
XDaiChainID = int64(100)
XDaiPostageStampContractAddress = common.HexToAddress("0x6a1a21eca3ab28be85c7ba22b2d6eae5907c900e")
XDaiStartBlock = uint64(16515648)
)
// DiscoverAddresses returns the canonical contracts for this chainID
func DiscoverAddresses(chainID int64) (postageStamp common.Address, startBlock uint64, found bool) {
switch chainID {
case GoerliChainID:
return GoerliPostageStampContractAddress, GoerliStartBlock, true
case XDaiChainID:
return XDaiPostageStampContractAddress, XDaiStartBlock, true
default:
return common.Address{}, 0, false
}
}
func totalTimeMetric(metric prometheus.Counter, start time.Time) {
totalTime := time.Since(start)
metric.Add(float64(totalTime))
......
......@@ -307,7 +307,7 @@ type updater struct {
eventC chan interface{}
}
func (u *updater) Create(id, owner []byte, normalisedAmount *big.Int, depth, bucketDepth uint8, immutable bool) error {
func (u *updater) Create(id, owner []byte, normalisedAmount *big.Int, depth, bucketDepth uint8, immutable bool, _ []byte) error {
u.eventC <- createArgs{
id: id,
owner: owner,
......@@ -319,7 +319,7 @@ func (u *updater) Create(id, owner []byte, normalisedAmount *big.Int, depth, buc
return nil
}
func (u *updater) TopUp(id []byte, normalisedBalance *big.Int) error {
func (u *updater) TopUp(id []byte, normalisedBalance *big.Int, _ []byte) error {
u.eventC <- topupArgs{
id: id,
normalisedBalance: normalisedBalance,
......@@ -327,7 +327,7 @@ func (u *updater) TopUp(id []byte, normalisedBalance *big.Int) error {
return nil
}
func (u *updater) UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int) error {
func (u *updater) UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int, _ []byte) error {
u.eventC <- depthArgs{
id: id,
depth: depth,
......@@ -336,7 +336,7 @@ func (u *updater) UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int
return nil
}
func (u *updater) UpdatePrice(price *big.Int) error {
func (u *updater) UpdatePrice(price *big.Int, _ []byte) error {
u.eventC <- priceArgs{price}
return nil
}
......
......@@ -65,21 +65,15 @@ func (m *mockPostage) GetStampIssuer(id []byte) (*postage.StampIssuer, error) {
return nil, errors.New("stampissuer not found")
}
// SetDefaultIssuer sets the default stamps issuer.
func (m *mockPostage) SetDefaultIssuer([]byte) error {
// Noop, the default is m.i.
return nil
}
// DefaultIssuer returns the default stamps issuer.
func (m *mockPostage) DefaultIssuer() *postage.StampIssuer {
return m.i
}
func (m *mockPostage) IssuerUsable(_ *postage.StampIssuer) bool {
return true
}
// BatchExists returns always true.
func (m *mockPostage) BatchExists(_ []byte) (bool, error) {
return true, nil
}
func (m *mockPostage) Handle(_ *postage.Batch) {}
func (m *mockPostage) Close() error {
......
......@@ -34,8 +34,7 @@ type Service interface {
StampIssuers() []*StampIssuer
GetStampIssuer([]byte) (*StampIssuer, error)
IssuerUsable(*StampIssuer) bool
SetDefaultIssuer([]byte) error
DefaultIssuer() *StampIssuer
BatchExists([]byte) (bool, error)
BatchCreationListener
io.Closer
}
......@@ -43,12 +42,11 @@ type Service interface {
// service handles postage batches
// stores the active batches.
type service struct {
lock sync.Mutex
store storage.StateStorer
postageStore Storer
chainID int64
issuers []*StampIssuer
defaultStampIssuer *StampIssuer
lock sync.Mutex
store storage.StateStorer
postageStore Storer
chainID int64
issuers []*StampIssuer
}
// NewService constructs a new Service.
......@@ -90,24 +88,6 @@ func (ps *service) Add(st *StampIssuer) {
ps.issuers = append(ps.issuers, st)
}
// SetDefaultIssuer sets the default stamps issuer.
func (ps *service) SetDefaultIssuer(id []byte) error {
si, err := ps.GetStampIssuer(id)
if err != nil {
return err
}
ps.lock.Lock()
ps.defaultStampIssuer = si
ps.lock.Unlock()
return nil
}
// DefaultIssuer returns the default stamps issuer.
func (ps *service) DefaultIssuer() *StampIssuer {
return ps.defaultStampIssuer
}
// Handle implements the BatchCreationListener interface. This is fired on receiving
// a batch creation event from the blockchain listener to ensure that if a stamp
// issuer was not created initially, we will create it here.
......@@ -144,6 +124,11 @@ func (ps *service) IssuerUsable(st *StampIssuer) bool {
return true
}
// BatchExists returns true if the batch referenced by the given id exists.
func (ps *service) BatchExists(id []byte) (bool, error) {
return ps.postageStore.Exists(id)
}
// GetStampIssuer finds a stamp issuer by batch ID.
func (ps *service) GetStampIssuer(batchID []byte) (*StampIssuer, error) {
ps.lock.Lock()
......
......@@ -113,8 +113,10 @@ func toSignDigest(addr, batchId, index, timestamp []byte) ([]byte, error) {
return h.Sum(nil), nil
}
type ValidStampFn func(chunk swarm.Chunk, stampBytes []byte) (swarm.Chunk, error)
// ValidStamp returns a stampvalidator function passed to protocols with chunk entrypoints.
func ValidStamp(batchStore Storer) func(chunk swarm.Chunk, stampBytes []byte) (swarm.Chunk, error) {
func ValidStamp(batchStore Storer) ValidStampFn {
return func(chunk swarm.Chunk, stampBytes []byte) (swarm.Chunk, error) {
stamp := new(Stamp)
err := stamp.UnmarshalBinary(stampBytes)
......
......@@ -145,6 +145,13 @@ func (si *StampIssuer) BucketDepth() uint8 {
return si.data.BucketDepth
}
// BucketUpperBound returns the maximum number of collisions
// possible in a bucket given the batch's depth and bucket
// depth.
func (si *StampIssuer) BucketUpperBound() uint32 {
return 1 << (si.Depth() - si.BucketDepth())
}
// BlockNumber when this batch was created.
func (si *StampIssuer) BlockNumber() uint64 {
return si.data.BlockNumber
......@@ -154,3 +161,11 @@ func (si *StampIssuer) BlockNumber() uint64 {
func (si *StampIssuer) ImmutableFlag() bool {
return si.data.ImmutableFlag
}
func (si *StampIssuer) Buckets() []uint32 {
si.bucketMu.Lock()
b := make([]uint32, len(si.data.Buckets))
copy(b, si.data.Buckets)
si.bucketMu.Unlock()
return b
}
......@@ -49,7 +49,7 @@ type Service struct {
paymentThresholdObserver PaymentThresholdObserver
}
func New(streamer p2p.Streamer, logger logging.Logger, paymentThreshold *big.Int, minThreshold *big.Int) *Service {
func New(streamer p2p.Streamer, logger logging.Logger, paymentThreshold, minThreshold *big.Int) *Service {
return &Service{
streamer: streamer,
logger: logger,
......
......@@ -30,11 +30,11 @@ func BenchmarkWrap(b *testing.B) {
depth int
}{
{1, 1},
{4, 1},
{16, 1},
{256, 2},
{8, 1},
{256, 1},
{16, 2},
{64, 2},
{256, 2},
{256, 3},
{4096, 3},
{16384, 3},
......@@ -46,12 +46,13 @@ func BenchmarkWrap(b *testing.B) {
b.Fatal(err)
}
pubkey := &key.PublicKey
ctx := context.Background()
for _, c := range cases {
name := fmt.Sprintf("length:%d,depth:%d", c.length, c.depth)
b.Run(name, func(b *testing.B) {
targets := newTargets(c.length, c.depth)
for i := 0; i < b.N; i++ {
if _, err := pss.Wrap(context.Background(), topic, msg, pubkey, targets); err != nil {
if _, err := pss.Wrap(ctx, topic, msg, pubkey, targets); err != nil {
b.Fatal(err)
}
}
......
......@@ -13,8 +13,7 @@ import (
"encoding/hex"
"errors"
"fmt"
"math"
"math/big"
"io"
"github.com/btcsuite/btcd/btcec"
"github.com/ethersphere/bee/pkg/bmtpool"
......@@ -22,6 +21,7 @@ import (
"github.com/ethersphere/bee/pkg/encryption"
"github.com/ethersphere/bee/pkg/encryption/elgamal"
"github.com/ethersphere/bee/pkg/swarm"
"golang.org/x/sync/errgroup"
)
var (
......@@ -33,8 +33,6 @@ var (
// ErrVarLenTargets is returned when the given target list for a trojan chunk has addresses of different lengths
ErrVarLenTargets = errors.New("target list cannot have targets of different length")
maxUint32 = big.NewInt(math.MaxUint32)
)
// Topic is the type that classifies messages, allows client applications to subscribe to
......@@ -204,61 +202,53 @@ func contains(col Targets, elem []byte) bool {
// mine iteratively enumerates different nonces until the address (BMT hash) of the chunkhas one of the targets as its prefix
func mine(ctx context.Context, odd bool, f func(nonce []byte) (swarm.Chunk, error)) (swarm.Chunk, error) {
seeds := make([]uint32, 8)
for i := range seeds {
b, err := random.Int(random.Reader, maxUint32)
if err != nil {
return nil, err
}
seeds[i] = uint32(b.Int64())
}
initnonce := make([]byte, 32)
for i := 0; i < 8; i++ {
binary.LittleEndian.PutUint32(initnonce[i*4:i*4+4], seeds[i])
if _, err := io.ReadFull(random.Reader, initnonce); err != nil {
return nil, err
}
if odd {
initnonce[28] |= 0x01
} else {
initnonce[28] &= 0xfe
}
seeds[7] = binary.LittleEndian.Uint32(initnonce[28:32])
quit := make(chan struct{})
// make both errs and result channels buffered so they never block
ctx, cancel := context.WithCancel(ctx)
defer cancel()
eg, ctx := errgroup.WithContext(ctx)
result := make(chan swarm.Chunk, 8)
errs := make(chan error, 8)
for i := 0; i < 8; i++ {
go func(j int) {
eg.Go(func() error {
nonce := make([]byte, 32)
copy(nonce, initnonce)
for seed := seeds[j]; ; seed++ {
binary.LittleEndian.PutUint32(nonce[j*4:j*4+4], seed)
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
if _, err := io.ReadFull(random.Reader, nonce[:4]); err != nil {
return err
}
res, err := f(nonce)
if err != nil {
errs <- err
return
return err
}
if res != nil {
result <- res
return
}
select {
case <-quit:
return
default:
return nil
}
}
}(i)
})
}
defer close(quit)
select {
case <-ctx.Done():
return nil, ctx.Err()
case err := <-errs:
var err error
go func() {
err = eg.Wait()
result <- nil
}()
r := <-result
if r == nil {
return nil, err
case res := <-result:
return res, nil
}
return r, nil
}
// extracts ephemeral public key from the chunk data to use with el-Gamal
......
......@@ -166,13 +166,14 @@ func (p *PullSyncMock) SyncInterval(ctx context.Context, peer swarm.Address, bin
return 0, 1, context.Canceled
}
if isLive && len(p.liveSyncReplies) > 0 {
p.mtx.Lock()
if p.liveSyncCalls >= len(p.liveSyncReplies) {
p.mtx.Unlock()
<-p.quit
// when shutting down, onthe puller side we cancel the context going into the pullsync protocol request
// this results in SyncInterval returning with a context cancelled error
return 0, 0, context.Canceled
}
p.mtx.Lock()
v := p.liveSyncReplies[p.liveSyncCalls]
p.liveSyncCalls++
p.mtx.Unlock()
......
......@@ -21,6 +21,7 @@ import (
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/protobuf"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/pullsync/pb"
"github.com/ethersphere/bee/pkg/pullsync/pullstorage"
"github.com/ethersphere/bee/pkg/soc"
......@@ -67,7 +68,7 @@ type Syncer struct {
quit chan struct{}
wg sync.WaitGroup
unwrap func(swarm.Chunk)
validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error)
validStamp postage.ValidStampFn
ruidMtx sync.Mutex
ruidCtx map[uint32]func()
......@@ -76,7 +77,7 @@ type Syncer struct {
io.Closer
}
func New(streamer p2p.Streamer, storage pullstorage.Storer, unwrap func(swarm.Chunk), validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error), logger logging.Logger) *Syncer {
func New(streamer p2p.Streamer, storage pullstorage.Storer, unwrap func(swarm.Chunk), validStamp postage.ValidStampFn, logger logging.Logger) *Syncer {
return &Syncer{
streamer: streamer,
storage: storage,
......@@ -228,7 +229,8 @@ func (s *Syncer) SyncInterval(ctx context.Context, peer swarm.Address, bin uint8
chunk := swarm.NewChunk(addr, delivery.Data)
if chunk, err = s.validStamp(chunk, delivery.Stamp); err != nil {
return 0, ru.Ruid, err
s.logger.Debugf("unverified chunk: %w", err)
continue
}
if cac.Valid(chunk) {
......
......@@ -19,6 +19,7 @@ import (
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/protobuf"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/pricer"
"github.com/ethersphere/bee/pkg/pushsync/pb"
"github.com/ethersphere/bee/pkg/soc"
......@@ -71,7 +72,7 @@ type PushSync struct {
pricer pricer.Interface
metrics metrics
tracer *tracing.Tracer
validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error)
validStamp postage.ValidStampFn
signer crypto.Signer
isFullNode bool
warmupPeriod time.Time
......@@ -82,7 +83,7 @@ var defaultTTL = 20 * time.Second // request time to live
var timeToWaitForPushsyncToNeighbor = 3 * time.Second // time to wait to get a receipt for a chunk
var nPeersToPushsync = 3 // number of peers to replicate to as receipt is sent upstream
func New(address swarm.Address, blockHash []byte, streamer p2p.StreamerDisconnecter, storer storage.Putter, topology topology.Driver, tagger *tags.Tags, isFullNode bool, unwrap func(swarm.Chunk), validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error), logger logging.Logger, accounting accounting.Interface, pricer pricer.Interface, signer crypto.Signer, tracer *tracing.Tracer, warmupTime time.Duration) *PushSync {
func New(address swarm.Address, blockHash []byte, streamer p2p.StreamerDisconnecter, storer storage.Putter, topology topology.Driver, tagger *tags.Tags, isFullNode bool, unwrap func(swarm.Chunk), validStamp postage.ValidStampFn, logger logging.Logger, accounting accounting.Interface, pricer pricer.Interface, signer crypto.Signer, tracer *tracing.Tracer, warmupTime time.Duration) *PushSync {
ps := &PushSync{
address: address,
blockHash: blockHash,
......@@ -139,9 +140,14 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
ps.metrics.TotalReceived.Inc()
chunk := swarm.NewChunk(swarm.NewAddress(ch.Address), ch.Data)
if chunk, err = ps.validStamp(chunk, ch.Stamp); err != nil {
return fmt.Errorf("pushsync valid stamp: %w", err)
chunkAddress := chunk.Address()
stamp := new(postage.Stamp)
// attaching the stamp is required becase pushToClosest expects a chunk with a stamp
err = stamp.UnmarshalBinary(ch.Stamp)
if err != nil {
return fmt.Errorf("pushsync stamp unmarshall: %w", err)
}
chunk.WithStamp(stamp)
if cac.Valid(chunk) {
if ps.unwrap != nil {
......@@ -151,16 +157,21 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
return swarm.ErrInvalidChunk
}
price := ps.pricer.Price(chunk.Address())
price := ps.pricer.Price(chunkAddress)
// if the peer is closer to the chunk, AND it's a full node, we were selected for replication. Return early.
if p.FullNode {
bytes := chunk.Address().Bytes()
bytes := chunkAddress.Bytes()
if dcmp, _ := swarm.DistanceCmp(bytes, p.Address.Bytes(), ps.address.Bytes()); dcmp == 1 {
if ps.topologyDriver.IsWithinDepth(chunk.Address()) {
if ps.topologyDriver.IsWithinDepth(chunkAddress) {
ctxd, canceld := context.WithTimeout(context.Background(), timeToWaitForPushsyncToNeighbor)
defer canceld()
chunk, err = ps.validStamp(chunk, ch.Stamp)
if err != nil {
return fmt.Errorf("pushsync valid stamp: %w", err)
}
_, err = ps.storer.Put(ctxd, storage.ModePutSync, chunk)
if err != nil {
return fmt.Errorf("chunk store: %w", err)
......@@ -191,7 +202,13 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
// forwarding replication
storedChunk := false
if ps.topologyDriver.IsWithinDepth(chunk.Address()) {
if ps.topologyDriver.IsWithinDepth(chunkAddress) {
chunk, err = ps.validStamp(chunk, ch.Stamp)
if err != nil {
return fmt.Errorf("pushsync valid stamp: %w", err)
}
_, err = ps.storer.Put(ctx, storage.ModePutSync, chunk)
if err != nil {
ps.logger.Warningf("pushsync: within depth peer's attempt to store chunk failed: %v", err)
......@@ -200,13 +217,19 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
}
}
span, _, ctx := ps.tracer.StartSpanFromContext(ctx, "pushsync-handler", ps.logger, opentracing.Tag{Key: "address", Value: chunk.Address().String()})
span, _, ctx := ps.tracer.StartSpanFromContext(ctx, "pushsync-handler", ps.logger, opentracing.Tag{Key: "address", Value: chunkAddress.String()})
defer span.Finish()
receipt, err := ps.pushToClosest(ctx, chunk, false, p.Address)
if err != nil {
if errors.Is(err, topology.ErrWantSelf) {
if !storedChunk {
chunk, err = ps.validStamp(chunk, ch.Stamp)
if err != nil {
return fmt.Errorf("pushsync valid stamp: %w", err)
}
_, err = ps.storer.Put(ctx, storage.ModePutSync, chunk)
if err != nil {
return fmt.Errorf("chunk store: %w", err)
......@@ -225,7 +248,7 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
}
defer debit.Cleanup()
receipt := pb.Receipt{Address: chunk.Address().Bytes(), Signature: signature, BlockHash: ps.blockHash}
receipt := pb.Receipt{Address: chunkAddress.Bytes(), Signature: signature, BlockHash: ps.blockHash}
if err := w.WriteMsgWithContext(ctx, &receipt); err != nil {
return fmt.Errorf("send receipt to peer %s: %w", p.Address.String(), err)
}
......@@ -295,10 +318,14 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, retryAllo
// in which case we should return immediately.
// if ErrWantSelf is returned, it means we are the closest peer.
if errors.Is(err, topology.ErrWantSelf) {
if time.Now().Before(ps.warmupPeriod) {
if !ps.warmedUp() {
return nil, ErrWarmup
}
if !ps.topologyDriver.IsWithinDepth(ch.Address()) {
return nil, ErrNoPush
}
count := 0
// Push the chunk to some peers in the neighborhood in parallel for replication.
// Any errors here should NOT impact the rest of the handler.
......@@ -308,6 +335,12 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, retryAllo
return false, false, nil
}
// here we skip the peer if the peer is closer to the chunk than us
// we replicate with peers that are further away than us because we are the storer
if dcmp, _ := swarm.DistanceCmp(ch.Address().Bytes(), peer.Bytes(), ps.address.Bytes()); dcmp == 1 {
return false, false, nil
}
if count == nPeersToPushsync {
return true, false, nil
}
......@@ -342,7 +375,16 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, retryAllo
}
if err != nil {
logger.Debugf("could not push to peer %s: %v", peer, err)
resultC <- &pushResult{err: err, attempted: attempted}
// if the node has warmed up AND no other closer peer has been tried
if ps.warmedUp() && !ps.skipList.HasChunk(ch.Address()) {
ps.skipList.Add(peer, ch.Address(), skipPeerExpiration)
}
select {
case resultC <- &pushResult{err: err, attempted: attempted}:
case <-ctx.Done():
}
return
}
select {
......@@ -360,11 +402,6 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, retryAllo
}
if r.err != nil && r.attempted {
ps.metrics.TotalFailedSendAttempts.Inc()
// if the node has warmed up AND no other closer peer has been tried
if time.Now().After(ps.warmupPeriod) && !ps.skipList.HasChunk(ch.Address()) {
ps.skipList.Add(peer, ch.Address(), skipPeerExpiration)
}
}
case <-ctx.Done():
return nil, ctx.Err()
......@@ -503,6 +540,10 @@ func (ps *PushSync) pushToNeighbour(peer swarm.Address, ch swarm.Chunk, origin b
err = ps.accounting.Credit(peer, receiptPrice, origin)
}
func (ps *PushSync) warmedUp() bool {
return time.Now().After(ps.warmupPeriod)
}
type peerSkipList struct {
sync.Mutex
chunks map[string]struct{}
......@@ -516,7 +557,7 @@ func newPeerSkipList() *peerSkipList {
}
}
func (l *peerSkipList) Add(peer swarm.Address, chunk swarm.Address, expire time.Duration) {
func (l *peerSkipList) Add(peer, chunk swarm.Address, expire time.Duration) {
l.Lock()
defer l.Unlock()
......
This diff is collapsed.
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ratelimit provides a mechanism to rate limit requests based on a string key,
// refill rate and burst amount. Under the hood, it's a token bucket of size burst amount,
// that refills at the refill rate.
package ratelimit
import (
"sync"
"time"
"golang.org/x/time/rate"
)
type Limiter struct {
mtx sync.Mutex
limiter map[string]*rate.Limiter
rate rate.Limit
burst int
}
// New returns a new Limiter object with refresh rate and burst amount
func New(r time.Duration, burst int) *Limiter {
return &Limiter{
limiter: make(map[string]*rate.Limiter),
rate: rate.Every(r),
burst: burst,
}
}
// Allow checks if the limiter that belongs to 'key' has not exceeded the limit.
func (l *Limiter) Allow(key string, count int) bool {
l.mtx.Lock()
defer l.mtx.Unlock()
limiter, ok := l.limiter[key]
if !ok {
limiter = rate.NewLimiter(l.rate, l.burst)
l.limiter[key] = limiter
}
return limiter.AllowN(time.Now(), count)
}
// Clear deletes the limiter that belongs to 'key'
func (l *Limiter) Clear(key string) {
l.mtx.Lock()
defer l.mtx.Unlock()
delete(l.limiter, key)
}
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ratelimit_test
import (
"testing"
"time"
"github.com/ethersphere/bee/pkg/ratelimit"
)
func TestRateLimit(t *testing.T) {
var (
key1 = "test1"
key2 = "test2"
rate = time.Second
burst = 10
)
limiter := ratelimit.New(rate, burst)
if !limiter.Allow(key1, burst) {
t.Fatal("want allowed")
}
if limiter.Allow(key1, burst) {
t.Fatalf("want not allowed")
}
limiter.Clear(key1)
if !limiter.Allow(key1, burst) {
t.Fatal("want allowed")
}
if !limiter.Allow(key2, burst) {
t.Fatal("want allowed")
}
}
......@@ -233,12 +233,12 @@ func newTestNetStore(t *testing.T, recoveryFunc recovery.Callback) storage.Store
return nil
}}
server := retrieval.New(swarm.ZeroAddress, mockStorer, nil, ps0, logger, serverMockAccounting, pricerMock, nil)
server := retrieval.New(swarm.ZeroAddress, mockStorer, nil, ps0, logger, serverMockAccounting, pricerMock, nil, false, noopStampValidator)
recorder := streamtest.New(
streamtest.WithProtocols(server.Protocol()),
streamtest.WithBaseAddr(peerID),
)
retrieve := retrieval.New(swarm.ZeroAddress, mockStorer, recorder, ps, logger, serverMockAccounting, pricerMock, nil)
retrieve := retrieval.New(swarm.ZeroAddress, mockStorer, recorder, ps, logger, serverMockAccounting, pricerMock, nil, false, noopStampValidator)
validStamp := func(ch swarm.Chunk, stamp []byte) (swarm.Chunk, error) {
return ch.WithStamp(postage.NewStamp(nil, nil, nil, nil)), nil
}
......@@ -267,3 +267,7 @@ func (mp *mockPssSender) Send(ctx context.Context, topic pss.Topic, payload []by
mp.callbackC <- true
return nil
}
var noopStampValidator = func(chunk swarm.Chunk, stampBytes []byte) (swarm.Chunk, error) {
return chunk, nil
}
......@@ -14,7 +14,7 @@ import (
"github.com/ethersphere/bee/pkg/swarm"
)
func TestENSntegration(t *testing.T) {
func TestENSIntegration(t *testing.T) {
// TODO: consider using a stable gateway instead of INFURA.
defaultEndpoint := "https://goerli.infura.io/v3/59d83a5a4be74f86b9851190c802297b"
defaultAddr := swarm.MustParseHexAddress("00cb23598c2e520b6a6aae3ddc94fed4435a2909690bdd709bf9d9e7c2aadfad")
......
......@@ -14,7 +14,7 @@ const SwarmContentHashPrefix = swarmContentHashPrefix
var ErrNotImplemented = errNotImplemented
// WithConnectFunc will set the Dial function implementaton.
// WithConnectFunc will set the Dial function implementation.
func WithConnectFunc(fn func(endpoint string, contractAddr string) (*ethclient.Client, *goens.Registry, error)) Option {
return func(c *Client) {
c.connectFn = fn
......
......@@ -64,9 +64,11 @@ type Service struct {
metrics metrics
pricer pricer.Interface
tracer *tracing.Tracer
caching bool
validStamp postage.ValidStampFn
}
func New(addr swarm.Address, storer storage.Storer, streamer p2p.Streamer, chunkPeerer topology.EachPeerer, logger logging.Logger, accounting accounting.Interface, pricer pricer.Interface, tracer *tracing.Tracer) *Service {
func New(addr swarm.Address, storer storage.Storer, streamer p2p.Streamer, chunkPeerer topology.EachPeerer, logger logging.Logger, accounting accounting.Interface, pricer pricer.Interface, tracer *tracing.Tracer, forwarderCaching bool, validStamp postage.ValidStampFn) *Service {
return &Service{
addr: addr,
streamer: streamer,
......@@ -77,6 +79,8 @@ func New(addr swarm.Address, storer storage.Storer, streamer p2p.Streamer, chunk
pricer: pricer,
metrics: newMetrics(),
tracer: tracer,
caching: forwarderCaching,
validStamp: validStamp,
}
}
......@@ -154,16 +158,22 @@ func (s *Service) RetrieveChunk(ctx context.Context, addr swarm.Address, origin
defer cancel()
chunk, peer, requested, err := s.retrieveChunk(ctx, addr, sp, origin)
resultC <- retrievalResult{
select {
case resultC <- retrievalResult{
chunk: chunk,
peer: peer,
err: err,
retrieved: requested,
}:
case <-ctx.Done():
}
}()
} else {
resultC <- retrievalResult{}
select {
case resultC <- retrievalResult{}:
case <-ctx.Done():
}
}
select {
......@@ -410,6 +420,8 @@ func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (e
ctx = context.WithValue(ctx, requestSourceContextKey{}, p.Address.String())
addr := swarm.NewAddress(req.Addr)
forwarded := false
chunk, err := s.storer.Get(ctx, storage.ModeGetRequest, addr)
if err != nil {
if errors.Is(err, storage.ErrNotFound) {
......@@ -418,11 +430,11 @@ func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (e
if err != nil {
return fmt.Errorf("retrieve chunk: %w", err)
}
forwarded = true
} else {
return fmt.Errorf("get from store: %w", err)
}
}
stamp, err := chunk.Stamp().MarshalBinary()
if err != nil {
return fmt.Errorf("stamp marshal: %w", err)
......@@ -443,6 +455,28 @@ func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (e
}
s.logger.Tracef("retrieval protocol debiting peer %s", p.Address.String())
// debit price from p's balance
return debit.Apply()
if err := debit.Apply(); err != nil {
return fmt.Errorf("apply debit: %w", err)
}
// cache the request last, so that putting to the localstore does not slow down the request flow
if s.caching && forwarded {
putMode := storage.ModePutRequest
cch, err := s.validStamp(chunk, stamp)
if err != nil {
// if a chunk with an invalid postage stamp was received
// we force it into the cache.
putMode = storage.ModePutRequestCache
cch = chunk
}
_, err = s.storer.Put(ctx, putMode, cch)
if err != nil {
return fmt.Errorf("retrieve cache put: %w", err)
}
}
return nil
}
......@@ -61,7 +61,7 @@ func TestDelivery(t *testing.T) {
}
// create the server that will handle the request and will serve the response
server := retrieval.New(swarm.MustParseHexAddress("0034"), mockStorer, nil, nil, logger, serverMockAccounting, pricerMock, nil)
server := retrieval.New(swarm.MustParseHexAddress("0034"), mockStorer, nil, nil, logger, serverMockAccounting, pricerMock, nil, false, noopStampValidator)
recorder := streamtest.New(
streamtest.WithProtocols(server.Protocol()),
streamtest.WithBaseAddr(clientAddr),
......@@ -78,7 +78,7 @@ func TestDelivery(t *testing.T) {
return nil
}}
client := retrieval.New(clientAddr, clientMockStorer, recorder, ps, logger, clientMockAccounting, pricerMock, nil)
client := retrieval.New(clientAddr, clientMockStorer, recorder, ps, logger, clientMockAccounting, pricerMock, nil, false, noopStampValidator)
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
v, err := client.RetrieveChunk(ctx, chunk.Address(), true)
......@@ -167,14 +167,14 @@ func TestRetrieveChunk(t *testing.T) {
t.Fatal(err)
}
server := retrieval.New(serverAddress, serverStorer, nil, nil, logger, accountingmock.NewAccounting(), pricer, nil)
server := retrieval.New(serverAddress, serverStorer, nil, nil, logger, accountingmock.NewAccounting(), pricer, nil, false, noopStampValidator)
recorder := streamtest.New(streamtest.WithProtocols(server.Protocol()))
clientSuggester := mockPeerSuggester{eachPeerRevFunc: func(f topology.EachPeerFunc) error {
_, _, _ = f(serverAddress, 0)
return nil
}}
client := retrieval.New(clientAddress, nil, recorder, clientSuggester, logger, accountingmock.NewAccounting(), pricer, nil)
client := retrieval.New(clientAddress, nil, recorder, clientSuggester, logger, accountingmock.NewAccounting(), pricer, nil, false, noopStampValidator)
got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true)
if err != nil {
......@@ -207,11 +207,15 @@ func TestRetrieveChunk(t *testing.T) {
accountingmock.NewAccounting(),
pricer,
nil,
false,
noopStampValidator,
)
forwarderStore := storemock.NewStorer()
forwarder := retrieval.New(
forwarderAddress,
storemock.NewStorer(), // no chunk in forwarder's store
forwarderStore, // no chunk in forwarder's store
streamtest.New(streamtest.WithProtocols(server.Protocol())), // connect to server
mockPeerSuggester{eachPeerRevFunc: func(f topology.EachPeerFunc) error {
_, _, _ = f(serverAddress, 0) // suggest server's address
......@@ -221,6 +225,8 @@ func TestRetrieveChunk(t *testing.T) {
accountingmock.NewAccounting(),
pricer,
nil,
true, // note explicit caching
noopStampValidator,
)
client := retrieval.New(
......@@ -235,8 +241,14 @@ func TestRetrieveChunk(t *testing.T) {
accountingmock.NewAccounting(),
pricer,
nil,
false,
noopStampValidator,
)
if got, _ := forwarderStore.Has(context.Background(), chunk.Address()); got {
t.Fatalf("forwarder node already has chunk")
}
got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true)
if err != nil {
t.Fatal(err)
......@@ -244,6 +256,11 @@ func TestRetrieveChunk(t *testing.T) {
if !bytes.Equal(got.Data(), chunk.Data()) {
t.Fatalf("got data %x, want %x", got.Data(), chunk.Data())
}
if got, _ := forwarderStore.Has(context.Background(), chunk.Address()); !got {
t.Fatalf("forwarder did not cache chunk")
}
})
}
......@@ -301,8 +318,8 @@ func TestRetrievePreemptiveRetry(t *testing.T) {
return peerSuggester
}
server1 := retrieval.New(serverAddress1, serverStorer1, nil, noPeerSuggester, logger, accountingmock.NewAccounting(), pricerMock, nil)
server2 := retrieval.New(serverAddress2, serverStorer2, nil, noPeerSuggester, logger, accountingmock.NewAccounting(), pricerMock, nil)
server1 := retrieval.New(serverAddress1, serverStorer1, nil, noPeerSuggester, logger, accountingmock.NewAccounting(), pricerMock, nil, false, noopStampValidator)
server2 := retrieval.New(serverAddress2, serverStorer2, nil, noPeerSuggester, logger, accountingmock.NewAccounting(), pricerMock, nil, false, noopStampValidator)
t.Run("peer not reachable", func(t *testing.T) {
ranOnce := true
......@@ -330,7 +347,7 @@ func TestRetrievePreemptiveRetry(t *testing.T) {
streamtest.WithBaseAddr(clientAddress),
)
client := retrieval.New(clientAddress, nil, recorder, peerSuggesterFn(peers...), logger, accountingmock.NewAccounting(), pricerMock, nil)
client := retrieval.New(clientAddress, nil, recorder, peerSuggesterFn(peers...), logger, accountingmock.NewAccounting(), pricerMock, nil, false, noopStampValidator)
got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true)
if err != nil {
......@@ -366,7 +383,7 @@ func TestRetrievePreemptiveRetry(t *testing.T) {
),
)
client := retrieval.New(clientAddress, nil, recorder, peerSuggesterFn(peers...), logger, accountingmock.NewAccounting(), pricerMock, nil)
client := retrieval.New(clientAddress, nil, recorder, peerSuggesterFn(peers...), logger, accountingmock.NewAccounting(), pricerMock, nil, false, noopStampValidator)
got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true)
if err != nil {
......@@ -395,8 +412,8 @@ func TestRetrievePreemptiveRetry(t *testing.T) {
server1MockAccounting := accountingmock.NewAccounting()
server2MockAccounting := accountingmock.NewAccounting()
server1 := retrieval.New(serverAddress1, serverStorer1, nil, noPeerSuggester, logger, server1MockAccounting, pricerMock, nil)
server2 := retrieval.New(serverAddress2, serverStorer2, nil, noPeerSuggester, logger, server2MockAccounting, pricerMock, nil)
server1 := retrieval.New(serverAddress1, serverStorer1, nil, noPeerSuggester, logger, server1MockAccounting, pricerMock, nil, false, noopStampValidator)
server2 := retrieval.New(serverAddress2, serverStorer2, nil, noPeerSuggester, logger, server2MockAccounting, pricerMock, nil, false, noopStampValidator)
// NOTE: must be more than retry duration
// (here one second more)
......@@ -430,7 +447,7 @@ func TestRetrievePreemptiveRetry(t *testing.T) {
clientMockAccounting := accountingmock.NewAccounting()
client := retrieval.New(clientAddress, nil, recorder, peerSuggesterFn(peers...), logger, clientMockAccounting, pricerMock, nil)
client := retrieval.New(clientAddress, nil, recorder, peerSuggesterFn(peers...), logger, clientMockAccounting, pricerMock, nil, false, noopStampValidator)
got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true)
if err != nil {
......@@ -468,21 +485,25 @@ func TestRetrievePreemptiveRetry(t *testing.T) {
t.Run("peer forwards request", func(t *testing.T) {
// server 2 has the chunk
server2 := retrieval.New(serverAddress2, serverStorer2, nil, noPeerSuggester, logger, accountingmock.NewAccounting(), pricerMock, nil)
server2 := retrieval.New(serverAddress2, serverStorer2, nil, noPeerSuggester, logger, accountingmock.NewAccounting(), pricerMock, nil, false, noopStampValidator)
server1Recorder := streamtest.New(
streamtest.WithProtocols(server2.Protocol()),
)
// server 1 will forward request to server 2
server1 := retrieval.New(serverAddress1, serverStorer1, server1Recorder, peerSuggesterFn(serverAddress2), logger, accountingmock.NewAccounting(), pricerMock, nil)
server1 := retrieval.New(serverAddress1, serverStorer1, server1Recorder, peerSuggesterFn(serverAddress2), logger, accountingmock.NewAccounting(), pricerMock, nil, true, noopStampValidator)
clientRecorder := streamtest.New(
streamtest.WithProtocols(server1.Protocol()),
)
// client only knows about server 1
client := retrieval.New(clientAddress, nil, clientRecorder, peerSuggesterFn(serverAddress1), logger, accountingmock.NewAccounting(), pricerMock, nil)
client := retrieval.New(clientAddress, nil, clientRecorder, peerSuggesterFn(serverAddress1), logger, accountingmock.NewAccounting(), pricerMock, nil, false, noopStampValidator)
if got, _ := serverStorer1.Has(context.Background(), chunk.Address()); got {
t.Fatalf("forwarder node already has chunk")
}
got, err := client.RetrieveChunk(context.Background(), chunk.Address(), true)
if err != nil {
......@@ -492,6 +513,18 @@ func TestRetrievePreemptiveRetry(t *testing.T) {
if !bytes.Equal(got.Data(), chunk.Data()) {
t.Fatalf("got data %x, want %x", got.Data(), chunk.Data())
}
has := false
for i := 0; i < 10; i++ {
has, _ = serverStorer1.Has(context.Background(), chunk.Address())
if has {
break
}
time.Sleep(100 * time.Millisecond)
}
if !has {
t.Fatalf("forwarder node does not have chunk")
}
})
}
......@@ -505,3 +538,7 @@ func (s mockPeerSuggester) EachPeer(topology.EachPeerFunc) error {
func (s mockPeerSuggester) EachPeerRev(f topology.EachPeerFunc) error {
return s.eachPeerRevFunc(f)
}
var noopStampValidator = func(chunk swarm.Chunk, stampBytes []byte) (swarm.Chunk, error) {
return chunk, nil
}
......@@ -239,7 +239,7 @@ func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (e
}
// Pay initiates a payment to the given peer
func (s *Service) Pay(ctx context.Context, peer swarm.Address, amount *big.Int, checkAllowance *big.Int) (*big.Int, int64, error) {
func (s *Service) Pay(ctx context.Context, peer swarm.Address, amount, checkAllowance *big.Int) (*big.Int, int64, error) {
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
......
......@@ -41,7 +41,7 @@ type notifyPaymentSentCall struct {
err error
}
func newTestObserver(debtAmounts map[string]*big.Int, shadowBalanceAmounts map[string]*big.Int) *testObserver {
func newTestObserver(debtAmounts, shadowBalanceAmounts map[string]*big.Int) *testObserver {
return &testObserver{
receivedCalled: make(chan notifyPaymentReceivedCall, 1),
sentCalled: make(chan notifyPaymentSentCall, 1),
......
......@@ -41,7 +41,7 @@ var (
// ChequeStore handles the verification and storage of received cheques
type ChequeStore interface {
// ReceiveCheque verifies and stores a cheque. It returns the total amount earned.
ReceiveCheque(ctx context.Context, cheque *SignedCheque, exchangeRate *big.Int, deduction *big.Int) (*big.Int, error)
ReceiveCheque(ctx context.Context, cheque *SignedCheque, exchangeRate, deduction *big.Int) (*big.Int, error)
// LastCheque returns the last cheque we received from a specific chequebook.
LastCheque(chequebook common.Address) (*SignedCheque, error)
// LastCheques returns the last received cheques from every known chequebook.
......@@ -98,7 +98,7 @@ func (s *chequeStore) LastCheque(chequebook common.Address) (*SignedCheque, erro
}
// ReceiveCheque verifies and stores a cheque. It returns the totam amount earned.
func (s *chequeStore) ReceiveCheque(ctx context.Context, cheque *SignedCheque, exchangeRate *big.Int, deduction *big.Int) (*big.Int, error) {
func (s *chequeStore) ReceiveCheque(ctx context.Context, cheque *SignedCheque, exchangeRate, deduction *big.Int) (*big.Int, error) {
// verify we are the beneficiary
if cheque.Beneficiary != s.beneficiary {
return nil, ErrWrongBeneficiary
......
......@@ -141,7 +141,7 @@ LOOP:
return nil
}
func (c *factory) verifyChequebookAgainstFactory(ctx context.Context, factory common.Address, chequebook common.Address) (bool, error) {
func (c *factory) verifyChequebookAgainstFactory(ctx context.Context, factory, chequebook common.Address) (bool, error) {
callData, err := factoryABI.Pack("deployedContracts", chequebook)
if err != nil {
return false, err
......@@ -227,27 +227,3 @@ func (c *factory) ERC20Address(ctx context.Context) (common.Address, error) {
}
return *erc20Address, nil
}
var (
GoerliChainID = int64(5)
GoerliFactoryAddress = common.HexToAddress("0x73c412512E1cA0be3b89b77aB3466dA6A1B9d273")
GoerliLegacyFactoryAddress = common.HexToAddress("0xf0277caffea72734853b834afc9892461ea18474")
XDaiChainID = int64(100)
XDaiFactoryAddress = common.HexToAddress("0xc2d5a532cf69aa9a1378737d8ccdef884b6e7420")
)
// DiscoverFactoryAddress returns the canonical factory for this chainID
func DiscoverFactoryAddress(chainID int64) (currentFactory common.Address, legacyFactories []common.Address, found bool) {
switch chainID {
case GoerliChainID:
// goerli
return GoerliFactoryAddress, []common.Address{
GoerliLegacyFactoryAddress,
}, true
case XDaiChainID:
// xdai
return XDaiFactoryAddress, []common.Address{}, true
default:
return common.Address{}, nil, false
}
}
......@@ -131,11 +131,9 @@ func Init(
}
if err == storage.ErrNotFound {
logger.Info("no chequebook found, deploying new one.")
if swapInitialDeposit.Cmp(big.NewInt(0)) != 0 {
err = checkBalance(ctx, logger, swapInitialDeposit, swapBackend, chainId, overlayEthAddress, erc20Service)
if err != nil {
return nil, err
}
err = checkBalance(ctx, logger, swapInitialDeposit, swapBackend, chainId, overlayEthAddress, erc20Service)
if err != nil {
return nil, err
}
nonce := make([]byte, 32)
......
......@@ -46,7 +46,7 @@ func NewChequeStore(opts ...Option) chequebook.ChequeStore {
return mock
}
func (s *Service) ReceiveCheque(ctx context.Context, cheque *chequebook.SignedCheque, exchangeRate *big.Int, deduction *big.Int) (*big.Int, error) {
func (s *Service) ReceiveCheque(ctx context.Context, cheque *chequebook.SignedCheque, exchangeRate, deduction *big.Int) (*big.Int, error) {
return s.receiveCheque(ctx, cheque, exchangeRate, deduction)
}
......
......@@ -33,7 +33,7 @@ func MakeSettlementHeaders(exchangeRate, deduction *big.Int) p2p.Headers {
}
}
func ParseSettlementResponseHeaders(receivedHeaders p2p.Headers) (exchange *big.Int, deduction *big.Int, err error) {
func ParseSettlementResponseHeaders(receivedHeaders p2p.Headers) (exchange, deduction *big.Int, err error) {
exchangeRate, err := ParseExchangeHeader(receivedHeaders)
if err != nil {
......
......@@ -42,7 +42,7 @@ type Service struct {
cashoutStatusFunc func(ctx context.Context, peer swarm.Address) (*chequebook.CashoutStatus, error)
}
// WithsettlementFunc sets the mock settlement function
// WithSettlementSentFunc sets the mock settlement function
func WithSettlementSentFunc(f func(swarm.Address) (*big.Int, error)) Option {
return optionFunc(func(s *Service) {
s.settlementSentFunc = f
......@@ -55,7 +55,7 @@ func WithSettlementRecvFunc(f func(swarm.Address) (*big.Int, error)) Option {
})
}
// WithsettlementsFunc sets the mock settlements function
// WithSettlementsSentFunc sets the mock settlements function
func WithSettlementsSentFunc(f func() (map[string]*big.Int, error)) Option {
return optionFunc(func(s *Service) {
s.settlementsSentFunc = f
......@@ -247,7 +247,7 @@ func (s *Service) CashoutStatus(ctx context.Context, peer swarm.Address) (*chequ
return nil, nil
}
func (s *Service) ReceiveCheque(ctx context.Context, peer swarm.Address, cheque *chequebook.SignedCheque, exchangeRate *big.Int, deduction *big.Int) (err error) {
func (s *Service) ReceiveCheque(ctx context.Context, peer swarm.Address, cheque *chequebook.SignedCheque, exchangeRate, deduction *big.Int) (err error) {
defer func() {
if err == nil {
s.deductionForPeers[peer.String()] = struct{}{}
......
......@@ -16,7 +16,7 @@ type Service struct {
deduct *big.Int
}
func New(rate *big.Int, deduct *big.Int) Service {
func New(rate, deduct *big.Int) Service {
return Service{
rate: rate,
deduct: deduct,
......@@ -30,7 +30,7 @@ func (s Service) GetPrice(ctx context.Context) (*big.Int, *big.Int, error) {
return s.rate, s.deduct, nil
}
func (s Service) CurrentRates() (exchangeRate *big.Int, deduction *big.Int, err error) {
func (s Service) CurrentRates() (exchangeRate, deduction *big.Int, err error) {
return s.rate, s.deduct, nil
}
......@@ -42,7 +42,7 @@ func DiscoverPriceOracleAddress(chainID int64) (priceOracleAddress common.Addres
return common.Address{}, false
}
func (s Service) SetValues(rate *big.Int, deduct *big.Int) {
func (s Service) SetValues(rate, deduct *big.Int) {
s.rate = rate
s.deduct = deduct
}
......@@ -131,7 +131,7 @@ func (s *service) GetPrice(ctx context.Context) (*big.Int, *big.Int, error) {
return exchangeRate, deduction, nil
}
func (s *service) CurrentRates() (exchangeRate *big.Int, deduction *big.Int, err error) {
func (s *service) CurrentRates() (exchangeRate, deduction *big.Int, err error) {
if s.exchangeRate.Cmp(big.NewInt(0)) == 0 {
return nil, nil, errors.New("exchange rate not yet available")
}
......@@ -145,21 +145,3 @@ func (s *service) Close() error {
close(s.quitC)
return nil
}
var (
goerliChainID = int64(5)
goerliContractAddress = common.HexToAddress("0x0c9de531dcb38b758fe8a2c163444a5e54ee0db2")
xdaiChainID = int64(100)
xdaiContractAddress = common.HexToAddress("0x0FDc5429C50e2a39066D8A94F3e2D2476fcc3b85")
)
// DiscoverPriceOracleAddress returns the canonical price oracle for this chainID
func DiscoverPriceOracleAddress(chainID int64) (priceOracleAddress common.Address, found bool) {
switch chainID {
case goerliChainID:
return goerliContractAddress, true
case xdaiChainID:
return xdaiContractAddress, true
}
return common.Address{}, false
}
......@@ -75,7 +75,7 @@ func New(proto swapprotocol.Interface, logger logging.Logger, store storage.Stat
}
// ReceiveCheque is called by the swap protocol if a cheque is received.
func (s *Service) ReceiveCheque(ctx context.Context, peer swarm.Address, cheque *chequebook.SignedCheque, exchangeRate *big.Int, deduction *big.Int) (err error) {
func (s *Service) ReceiveCheque(ctx context.Context, peer swarm.Address, cheque *chequebook.SignedCheque, exchangeRate, deduction *big.Int) (err error) {
// check this is the same chequebook for this peer as previously
expectedChequebook, known, err := s.addressbook.Chequebook(peer)
if err != nil {
......
......@@ -50,7 +50,7 @@ type Interface interface {
// Swap is the interface the settlement layer should implement to receive cheques.
type Swap interface {
// ReceiveCheque is called by the swap protocol if a cheque is received.
ReceiveCheque(ctx context.Context, peer swarm.Address, cheque *chequebook.SignedCheque, exchangeRate *big.Int, deduction *big.Int) error
ReceiveCheque(ctx context.Context, peer swarm.Address, cheque *chequebook.SignedCheque, exchangeRate, deduction *big.Int) error
// Handshake is called by the swap protocol when a handshake is received.
Handshake(peer swarm.Address, beneficiary common.Address) error
GetDeductionForPeer(peer swarm.Address) (bool, error)
......
......@@ -296,7 +296,7 @@ func encodeInt64Append(buffer *[]byte, val int64) {
}
func decodeInt64Splice(buffer *[]byte) int64 {
val, n := binary.Varint((*buffer))
val, n := binary.Varint(*buffer)
*buffer = (*buffer)[n:]
return val
}
......
......@@ -66,7 +66,6 @@ var noopSanctionedPeerFn = func(_ swarm.Address) bool { return false }
type Options struct {
SaturationFunc binSaturationFunc
Bootnodes []ma.Multiaddr
StandaloneMode bool
BootnodeMode bool
BitSuffixLength int
}
......@@ -90,7 +89,6 @@ type Kad struct {
peerSig []chan struct{}
peerSigMtx sync.Mutex
logger logging.Logger // logger
standalone bool // indicates whether the node is working in standalone mode
bootnode bool // indicates whether the node is working in bootnode mode
collector *im.Collector
quit chan struct{} // quit channel
......@@ -136,7 +134,6 @@ func New(
manageC: make(chan struct{}, 1),
waitNext: waitnext.New(),
logger: logger,
standalone: o.StandaloneMode,
bootnode: o.BootnodeMode,
collector: im.NewCollector(metricsDB),
quit: make(chan struct{}),
......@@ -155,10 +152,10 @@ func New(
func (k *Kad) generateCommonBinPrefixes() {
bitCombinationsCount := int(math.Pow(2, float64(k.bitSuffixLength)))
bitSufixes := make([]uint8, bitCombinationsCount)
bitSuffixes := make([]uint8, bitCombinationsCount)
for i := 0; i < bitCombinationsCount; i++ {
bitSufixes[i] = uint8(i)
bitSuffixes[i] = uint8(i)
}
addr := swarm.MustParseHexAddress(k.base.String())
......@@ -197,7 +194,7 @@ func (k *Kad) generateCommonBinPrefixes() {
for l := i + 1; l < i+k.bitSuffixLength+1; l++ {
index, pos := l/8, l%8
if hasBit(bitSufixes[j], uint8(bitSuffixPos)) {
if hasBit(bitSuffixes[j], uint8(bitSuffixPos)) {
pseudoAddrBytes[index] = bits.Reverse8(setBit(bits.Reverse8(pseudoAddrBytes[index]), uint8(pos)))
} else {
pseudoAddrBytes[index] = bits.Reverse8(clearBit(bits.Reverse8(pseudoAddrBytes[index]), uint8(pos)))
......@@ -468,7 +465,7 @@ func (k *Kad) connectionAttemptsHandler(ctx context.Context, wg *sync.WaitGroup,
}
}
}
for i := 0; i < 64; i++ {
for i := 0; i < 32; i++ {
go connAttempt(peerConnChan)
}
for i := 0; i < 8; i++ {
......@@ -529,10 +526,6 @@ func (k *Kad) manage() {
default:
}
if k.standalone {
continue
}
oldDepth := k.NeighborhoodDepth()
k.connectNeighbours(&wg, peerConnChan, peerConnChan2)
k.connectBalanced(&wg, peerConnChan2)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment