Commit 84f54d73 authored by acud's avatar acud Committed by GitHub

storage incentives (#1562)

Co-authored-by: default avatarzelig <viktor.tron@gmail.com>
Co-authored-by: default avatarEsad Akar <esadakar@gmail.com>
Co-authored-by: default avatarRalph Pichler <pichler.ralph@gmail.com>
parent 5bacaf55
25c25
< BucketDepth = uint8(16)
---
> BucketDepth = uint8(10)
43c43
< var DefaultDepth = uint8(12) // 12 is the testnet depth at the time of merging to master
---
> var DefaultDepth = uint8(5) // 12 is the testnet depth at the time of merging to master
48c48
< var Capacity = exp2(23)
---
> var Capacity = exp2(10)
......@@ -56,11 +56,15 @@ jobs:
mkdir -p ~/.kube
cp /etc/rancher/k3s/k3s.yaml ~/.kube/config
echo "kubeconfig: ${HOME}/.kube/config" > ~/.beekeeper.yaml
- name: Apply patches
run: |
patch pkg/postage/batchstore/reserve.go .github/patches/postagereserve.patch
patch pkg/postage/postagecontract/contract.go .github/patches/postagecontract.patch
- name: Set testing cluster (DNS discovery)
run: |
echo -e "127.0.0.10\tregistry.localhost" | sudo tee -a /etc/hosts
for ((i=0; i<REPLICA; i++)); do echo -e "127.0.1.$((i+1))\tbee-${i}.localhost bee-${i}-debug.localhost"; done | sudo tee -a /etc/hosts
timeout 30m ./beeinfra.sh install --local -r "${REPLICA}" --bootnode /dnsaddr/localhost --geth --k3s --pay-threshold 1000000000000
timeout 30m ./beeinfra.sh install --local -r "${REPLICA}" --bootnode /dnsaddr/localhost --geth --k3s --pay-threshold 1000000000000 --postage
- name: Test pingpong
id: pingpong-1
run: until ./beekeeper check pingpong --api-scheme http --debug-api-scheme http --disable-namespace --debug-api-domain localhost --api-domain localhost --node-count "${REPLICA}"; do echo "waiting for pingpong..."; sleep .3; done
......@@ -70,18 +74,12 @@ jobs:
- name: Test settlements
id: settlements-1
run: ./beekeeper check settlements --api-scheme http --debug-api-scheme http --disable-namespace --debug-api-domain localhost --api-domain localhost --node-count "${REPLICA}" --upload-node-count "${REPLICA}" -t 1000000000000
- name: Test pushsync (bytes)
id: pushsync-bytes-1
run: ./beekeeper check pushsync --api-scheme http --debug-api-scheme http --disable-namespace --debug-api-domain localhost --api-domain localhost --node-count "${REPLICA}" --upload-node-count "${REPLICA}" --chunks-per-node 3
- name: Test pushsync (chunks)
id: pushsync-chunks-1
run: ./beekeeper check pushsync --api-scheme http --debug-api-scheme http --disable-namespace --debug-api-domain localhost --api-domain localhost --node-count "${REPLICA}" --upload-node-count "${REPLICA}" --chunks-per-node 3 --upload-chunks
- name: Test retrieval
id: retrieval-1
run: ./beekeeper check retrieval --api-scheme http --debug-api-scheme http --disable-namespace --debug-api-domain localhost --api-domain localhost --node-count "${REPLICA}" --upload-node-count "${REPLICA}" --chunks-per-node 3
- name: Test gc
id: gc-chunk-1
run: ./beekeeper check gc --db-capacity 2000 --api-scheme http --debug-api-scheme http --disable-namespace --debug-api-domain localhost --api-domain localhost --node-count "${REPLICA}" --wait 15
- name: Test manifest
id: manifest-1
run: ./beekeeper check manifest --api-scheme http --debug-api-scheme http --disable-namespace --debug-api-domain localhost --api-domain localhost --node-count "${REPLICA}"
......@@ -103,7 +101,7 @@ jobs:
cp /etc/rancher/k3s/k3s.yaml ~/.kube/config
- name: Set testing cluster (Node connection and clef enabled)
run: |
timeout 30m ./beeinfra.sh install --local -r "${REPLICA}" --geth --clef --k3s --pay-threshold 1000000000000
timeout 30m ./beeinfra.sh install --local -r "${REPLICA}" --geth --clef --k3s --pay-threshold 1000000000000 --postage
- name: Test pingpong
id: pingpong-2
run: until ./beekeeper check pingpong --api-scheme http --debug-api-scheme http --disable-namespace --debug-api-domain localhost --api-domain localhost --node-count "${REPLICA}"; do echo "waiting for pingpong..."; sleep .3; done
......@@ -113,15 +111,28 @@ jobs:
- name: Test settlements
id: settlements-2
run: ./beekeeper check settlements --api-scheme http --debug-api-scheme http --disable-namespace --debug-api-domain localhost --api-domain localhost --node-count "${REPLICA}" --upload-node-count "${REPLICA}" -t 1000000000000
- name: Test pushsync (bytes)
id: pushsync-bytes-2
run: ./beekeeper check pushsync --api-scheme http --debug-api-scheme http --disable-namespace --debug-api-domain localhost --api-domain localhost --node-count "${REPLICA}" --upload-node-count "${REPLICA}" --chunks-per-node 3 --retry-delay 5s
- name: Test pushsync (chunks)
id: pushsync-chunks-2
run: ./beekeeper check pushsync --api-scheme http --debug-api-scheme http --disable-namespace --debug-api-domain localhost --api-domain localhost --node-count "${REPLICA}" --upload-node-count "${REPLICA}" --chunks-per-node 3 --upload-chunks --retry-delay 5s
- name: Test retrieval
id: retrieval-2
run: ./beekeeper check retrieval --api-scheme http --debug-api-scheme http --disable-namespace --debug-api-domain localhost --api-domain localhost --node-count "${REPLICA}" --upload-node-count "${REPLICA}" --chunks-per-node 3
- name: Destroy the cluster
run: |
./beeinfra.sh uninstall
- name: Apply patches
run: |
patch pkg/postage/batchstore/reserve.go .github/patches/postagereserve_gc.patch
- name: Prepare testing cluster (storage incentives setup)
run: |
timeout 10m ./beeinfra.sh prepare --geth --k3s
- name: Set kube config
run: |
mkdir -p ~/.kube
cp /etc/rancher/k3s/k3s.yaml ~/.kube/config
- name: Set testing cluster (storage incentives setup)
run: |
timeout 10m ./beeinfra.sh install --local -r "${REPLICA}" --geth --k3s --pay-threshold 1000000000000 --postage --db-capacity 100
- name: Test gc
id: gc-chunk-1
run: ./beekeeper check gc --db-capacity 100 --reserve --api-scheme http --debug-api-scheme http --disable-namespace --debug-api-domain localhost --api-domain localhost --node-count "${REPLICA}" --wait 15s
- name: Destroy the cluster
run: |
./beeinfra.sh uninstall
- name: Retag Docker image and push for cache
if: success()
run: |
......@@ -152,18 +163,16 @@ jobs:
if: failure()
run: |
export FAILED='no-test'
if ${{ steps.pingpong-1.outcome=='failure' }}; then FAILED=pingpong-1; fi
if ${{ steps.fullconnectivity-1.outcome=='failure' }}; then FAILED=fullconnectivity-1; fi
if ${{ steps.settlements-1.outcome=='failure' }}; then FAILED=settlements-1; fi
if ${{ steps.pushsync-bytes-1.outcome=='failure' }}; then FAILED=pushsync-bytes-1; fi
if ${{ steps.pushsync-chunks-1.outcome=='failure' }}; then FAILED=pushsync-chunks-1; fi
if ${{ steps.retrieval-1.outcome=='failure' }}; then FAILED=retrieval-1; fi
if ${{ steps.manifest-1.outcome=='failure' }}; then FAILED=manifest-1; fi
if ${{ steps.gc-chunk-1.outcome=='failure' }}; then FAILED=gc-chunk-1; fi
if ${{ steps.pingpong-2.outcome=='failure' }}; then FAILED=pingpong-2; fi
if ${{ steps.fullconnectivity-2.outcome=='failure' }}; then FAILED=fullconnectivity-2; fi
if ${{ steps.settlements-2.outcome=='failure' }}; then FAILED=settlements-2; fi
if ${{ steps.pushsync-bytes-2.outcome=='failure' }}; then FAILED=pushsync-bytes-2; fi
if ${{ steps.pushsync-chunks-2.outcome=='failure' }}; then FAILED=pushsync-chunks-2; fi
if ${{ steps.retrieval-2.outcome=='failure' }}; then FAILED=retrieval-2; fi
if ${{ steps.pss.outcome=='failure' }}; then FAILED=pss; fi
if ${{ steps.soc.outcome=='failure' }}; then FAILED=soc; fi
KEYS=$(curl -sSf -X POST https://eu.relay.tunshell.com/api/sessions)
......
......@@ -59,6 +59,8 @@ const (
optionNameSwapInitialDeposit = "swap-initial-deposit"
optionNameSwapEnable = "swap-enable"
optionNameFullNode = "full-node"
optionNamePostageContractAddress = "postage-stamp-address"
optionNamePriceOracleAddress = "price-oracle-address"
)
func init() {
......@@ -226,6 +228,8 @@ func (c *command) setAllFlags(cmd *cobra.Command) {
cmd.Flags().String(optionNameSwapInitialDeposit, "100000000000000000", "initial deposit if deploying a new chequebook")
cmd.Flags().Bool(optionNameSwapEnable, true, "enable swap")
cmd.Flags().Bool(optionNameFullNode, true, "cause the node to start in full mode")
cmd.Flags().String(optionNamePostageContractAddress, "", "postage stamp contract address")
cmd.Flags().String(optionNamePriceOracleAddress, "", "price oracle address")
}
func newLogger(cmd *cobra.Command, verbosity string) (logging.Logger, error) {
......
......@@ -148,6 +148,8 @@ Welcome to the Swarm.... Bzzz Bzzzz Bzzzz
SwapInitialDeposit: c.config.GetString(optionNameSwapInitialDeposit),
SwapEnable: c.config.GetBool(optionNameSwapEnable),
FullNodeMode: fullNode,
PostageContractAddress: c.config.GetString(optionNamePostageContractAddress),
PriceOracleAddress: c.config.GetString(optionNamePriceOracleAddress),
})
if err != nil {
return err
......
......@@ -8,6 +8,7 @@ require (
github.com/coreos/go-semver v0.3.0
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/ethereum/go-ethereum v1.9.23
github.com/ethersphere/go-storage-incentives-abi v0.1.0
github.com/ethersphere/go-sw3-abi v0.3.2
github.com/ethersphere/langos v1.0.0
github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15
......
......@@ -159,6 +159,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum/go-ethereum v1.9.23 h1:SIKhg/z4Q7AbvqcxuPYvMxf36che/Rq/Pp0IdYEkbtw=
github.com/ethereum/go-ethereum v1.9.23/go.mod h1:JIfVb6esrqALTExdz9hRYvrP0xBDf6wCncIu1hNwHpM=
github.com/ethersphere/go-storage-incentives-abi v0.1.0 h1:yxNME3q5dha/pUtIYB07DALhhQjd3+uYhGLFqKMXVyg=
github.com/ethersphere/go-storage-incentives-abi v0.1.0/go.mod h1:SXvJVtM4sEsaSKD0jc1ClpDLw8ErPoROZDme4Wrc/Nc=
github.com/ethersphere/go-sw3-abi v0.3.2 h1:BVTuSZ9Ph/JJBglU9pCRSch3gDq4g5QEto6KzMYP/08=
github.com/ethersphere/go-sw3-abi v0.3.2/go.mod h1:BmpsvJ8idQZdYEtWnvxA8POYQ8Rl/NhyCdF0zLMOOJU=
github.com/ethersphere/langos v1.0.0 h1:NBtNKzXTTRSue95uOlzPN4py7Aofs0xWPzyj4AI1Vcc=
......@@ -1320,6 +1322,7 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
resenje.org/daemon v0.1.2/go.mod h1:mF5JRpH3EbrxI9WoeKY78e6PqSsbBtX9jAQL5vj/GBA=
resenje.org/email v0.1.3/go.mod h1:OhAVLRG3vqd9NSgayN3pAgzxTmc2B6mAefgShZvEgf0=
resenje.org/jsonhttp v0.2.0/go.mod h1:EDyeguyTWj2fU3D3SCE0qNTgthzyEkHYLM1uu0uikHU=
resenje.org/logging v0.1.5 h1:dw2TEg2kw7lhDqCCH5SqC1pFVuIFcqnTkI5PzgOhopM=
resenje.org/logging v0.1.5/go.mod h1:1IdoCm3+UwYfsplxDGV2pHCkUrLlQzlWwp4r28XfPx4=
resenje.org/marshal v0.1.1/go.mod h1:P7Cla6Ju5CFvW4Y8JbRgWX1Hcy4L1w4qcCsyadO7G94=
resenje.org/recovery v0.1.1/go.mod h1:3S6aCVKMJEWsSAb61oZTteaiqkIfQPTr1RdiWnRbhME=
......
......@@ -705,3 +705,72 @@ paths:
$ref: "SwarmCommon.yaml#/components/responses/500"
default:
description: Default response
"/stamps":
get:
summary: Get all available stamps for this node
tags:
- Get stamp batches
responses:
"200":
description: Returns an array of all available postage batches.
content:
application/json:
schema:
$ref: "SwarmCommon.yaml#/components/schemas/PostageBatchesResponse"
default:
description: Default response
"/stamps/{id}":
get:
summary: Get an individual postage batch status
tags:
- Get stamp batch
responses:
"200":
description: Returns an individual postage batch state
content:
application/json:
schema:
$ref: "SwarmCommon.yaml#/components/schemas/PostageBatchResponse"
"400":
$ref: "SwarmCommon.yaml#/components/responses/400"
default:
description: Default response
"/stamps/{amount}/{depth}":
get:
summary: Buy a new postage batch
tags:
- Buy stamp batch
parameters:
- in: path
name: amount
type: integer
required: true
description: Amount added to the balance
- in: path
name: depth
type: integer
required: true
description: Batch depth. Must be higher than default bucket depth (16)
- in: query
name: label
type: string
required: false
description: An optional label for this batch
responses:
"200":
description: Returns the newly created postage batch ID
content:
application/json:
schema:
$ref: "SwarmCommon.yaml#/components/schemas/BatchIDResponse"
"400":
$ref: "SwarmCommon.yaml#/components/responses/400"
"500":
$ref: "SwarmCommon.yaml#/components/responses/500"
default:
description: Default response
......@@ -263,6 +263,23 @@ components:
reference:
$ref: "#/components/schemas/SwarmReference"
PostageBatchResponse:
type: object
properties:
$ref: "#/components/schemas/PostageBatch"
PostageBatchesResponse:
type: object
properties:
stamps:
$ref: "#/components/schemas/PostageBatches"
BatchIDResponse:
type: object
properties:
batchID:
$ref: "#/components/schemas/SwarmAddress"
Response:
type: object
properties:
......@@ -283,6 +300,20 @@ components:
status:
type: string
PostageBatch:
type: object
properties:
batchID:
$ref: "#/components/schemas/SwarmAddress"
utilization:
type: integer
PostageBatches:
type: array
items:
$ref: "#/components/schemas/PostageBatch"
Settlement:
type: object
properties:
......
......@@ -8,6 +8,7 @@ package api
import (
"context"
"encoding/hex"
"errors"
"fmt"
"io"
......@@ -19,11 +20,14 @@ import (
"time"
"unicode/utf8"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/feeds"
"github.com/ethersphere/bee/pkg/file/pipeline/builder"
"github.com/ethersphere/bee/pkg/logging"
m "github.com/ethersphere/bee/pkg/metrics"
"github.com/ethersphere/bee/pkg/pinning"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/postage/postagecontract"
"github.com/ethersphere/bee/pkg/pss"
"github.com/ethersphere/bee/pkg/resolver"
"github.com/ethersphere/bee/pkg/storage"
......@@ -34,14 +38,15 @@ import (
)
const (
SwarmPinHeader = "Swarm-Pin"
SwarmTagHeader = "Swarm-Tag"
SwarmEncryptHeader = "Swarm-Encrypt"
SwarmIndexDocumentHeader = "Swarm-Index-Document"
SwarmErrorDocumentHeader = "Swarm-Error-Document"
SwarmFeedIndexHeader = "Swarm-Feed-Index"
SwarmFeedIndexNextHeader = "Swarm-Feed-Index-Next"
SwarmCollectionHeader = "Swarm-Collection"
SwarmPinHeader = "Swarm-Pin"
SwarmTagHeader = "Swarm-Tag"
SwarmEncryptHeader = "Swarm-Encrypt"
SwarmIndexDocumentHeader = "Swarm-Index-Document"
SwarmErrorDocumentHeader = "Swarm-Error-Document"
SwarmFeedIndexHeader = "Swarm-Feed-Index"
SwarmFeedIndexNextHeader = "Swarm-Feed-Index-Next"
SwarmCollectionHeader = "Swarm-Collection"
SwarmPostageBatchIdHeader = "Swarm-Postage-Batch-Id"
)
// The size of buffer used for prefetching content with Langos.
......@@ -65,11 +70,12 @@ const (
var (
errInvalidNameOrAddress = errors.New("invalid name or bzz address")
errNoResolver = errors.New("no resolver connected")
invalidRequest = errors.New("could not validate request")
invalidContentType = errors.New("invalid content-type")
invalidContentLength = errors.New("invalid content-length")
directoryStoreError = errors.New("could not store directory")
fileStoreError = errors.New("could not store file")
errInvalidRequest = errors.New("could not validate request")
errInvalidContentType = errors.New("invalid content-type")
errInvalidContentLength = errors.New("invalid content-length")
errDirectoryStore = errors.New("could not store directory")
errFileStore = errors.New("could not store file")
errInvalidPostageBatch = errors.New("invalid postage batch id")
)
// Service is the API service interface.
......@@ -80,15 +86,18 @@ type Service interface {
}
type server struct {
tags *tags.Tags
storer storage.Storer
resolver resolver.Interface
pss pss.Interface
traversal traversal.Traverser
pinning pinning.Interface
logger logging.Logger
tracer *tracing.Tracer
feedFactory feeds.Factory
tags *tags.Tags
storer storage.Storer
resolver resolver.Interface
pss pss.Interface
traversal traversal.Traverser
pinning pinning.Interface
logger logging.Logger
tracer *tracing.Tracer
feedFactory feeds.Factory
signer crypto.Signer
post postage.Service
postageContract postagecontract.Interface
Options
http.Handler
metrics metrics
......@@ -109,20 +118,23 @@ const (
)
// New will create a and initialize a new API service.
func New(tags *tags.Tags, storer storage.Storer, resolver resolver.Interface, pss pss.Interface, traversalService traversal.Traverser, pinning pinning.Interface, feedFactory feeds.Factory, logger logging.Logger, tracer *tracing.Tracer, o Options) Service {
func New(tags *tags.Tags, storer storage.Storer, resolver resolver.Interface, pss pss.Interface, traversalService traversal.Traverser, pinning pinning.Interface, feedFactory feeds.Factory, post postage.Service, postageContract postagecontract.Interface, signer crypto.Signer, logger logging.Logger, tracer *tracing.Tracer, o Options) Service {
s := &server{
tags: tags,
storer: storer,
resolver: resolver,
pss: pss,
traversal: traversalService,
pinning: pinning,
feedFactory: feedFactory,
Options: o,
logger: logger,
tracer: tracer,
metrics: newMetrics(),
quit: make(chan struct{}),
tags: tags,
storer: storer,
resolver: resolver,
pss: pss,
traversal: traversalService,
pinning: pinning,
feedFactory: feedFactory,
post: post,
postageContract: postageContract,
signer: signer,
Options: o,
logger: logger,
tracer: tracer,
metrics: newMetrics(),
quit: make(chan struct{}),
}
s.setupRouting()
......@@ -211,6 +223,21 @@ func requestEncrypt(r *http.Request) bool {
return strings.ToLower(r.Header.Get(SwarmEncryptHeader)) == "true"
}
func requestPostageBatchId(r *http.Request) ([]byte, error) {
if h := strings.ToLower(r.Header.Get(SwarmPostageBatchIdHeader)); h != "" {
if len(h) != 64 {
return nil, errInvalidPostageBatch
}
b, err := hex.DecodeString(h)
if err != nil {
return nil, errInvalidPostageBatch
}
return b, nil
}
return nil, errInvalidPostageBatch
}
func (s *server) newTracingHandler(spanName string) func(h http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
......@@ -285,9 +312,36 @@ func equalASCIIFold(s, t string) bool {
return s == t
}
type stamperPutter struct {
storage.Storer
stamper postage.Stamper
}
func newStamperPutter(s storage.Storer, post postage.Service, signer crypto.Signer, batch []byte) (storage.Storer, error) {
i, err := post.GetStampIssuer(batch)
if err != nil {
return nil, fmt.Errorf("stamp issuer: %w", err)
}
stamper := postage.NewStamper(i, signer)
return &stamperPutter{Storer: s, stamper: stamper}, nil
}
func (p *stamperPutter) Put(ctx context.Context, mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err error) {
for i, c := range chs {
stamp, err := p.stamper.Stamp(c.Address())
if err != nil {
return nil, err
}
chs[i] = c.WithStamp(stamp)
}
return p.Storer.Put(ctx, mode, chs...)
}
type pipelineFunc func(context.Context, io.Reader, int64) (swarm.Address, error)
func requestPipelineFn(s storage.Storer, r *http.Request) pipelineFunc {
func requestPipelineFn(s storage.Putter, r *http.Request) pipelineFunc {
mode, encrypt := requestModePut(r), requestEncrypt(r)
return func(ctx context.Context, r io.Reader, l int64) (swarm.Address, error) {
pipe := builder.NewPipelineBuilder(ctx, s, mode, encrypt)
......
......@@ -5,6 +5,9 @@
package api_test
import (
"bytes"
"crypto/rand"
"encoding/hex"
"errors"
"io"
"io/ioutil"
......@@ -15,13 +18,20 @@ import (
"time"
"github.com/ethersphere/bee/pkg/api"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/feeds"
"github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/pinning"
"github.com/ethersphere/bee/pkg/postage"
mockpost "github.com/ethersphere/bee/pkg/postage/mock"
"github.com/ethersphere/bee/pkg/postage/postagecontract"
"github.com/ethersphere/bee/pkg/pss"
"github.com/ethersphere/bee/pkg/resolver"
resolverMock "github.com/ethersphere/bee/pkg/resolver/mock"
statestore "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage/mock"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/traversal"
......@@ -29,6 +39,19 @@ import (
"resenje.org/web"
)
var (
batchInvalid = []byte{0}
batchOk = make([]byte, 32)
batchOkStr string
batchEmpty = []byte{}
)
func init() {
_, _ = rand.Read(batchOk)
batchOkStr = hex.EncodeToString(batchOk)
}
type testServerOptions struct {
Storer storage.Storer
Resolver resolver.Interface
......@@ -43,10 +66,14 @@ type testServerOptions struct {
PreventRedirect bool
Feeds feeds.Factory
CORSAllowedOrigins []string
PostageContract postagecontract.Interface
Post postage.Service
}
func newTestServer(t *testing.T, o testServerOptions) (*http.Client, *websocket.Conn, string) {
t.Helper()
pk, _ := crypto.GenerateSecp256k1Key()
signer := crypto.NewDefaultSigner(pk)
if o.Logger == nil {
o.Logger = logging.New(ioutil.Discard, 0)
......@@ -57,7 +84,10 @@ func newTestServer(t *testing.T, o testServerOptions) (*http.Client, *websocket.
if o.WsPingPeriod == 0 {
o.WsPingPeriod = 60 * time.Second
}
s := api.New(o.Tags, o.Storer, o.Resolver, o.Pss, o.Traversal, o.Pinning, o.Feeds, o.Logger, nil, api.Options{
if o.Post == nil {
o.Post = mockpost.New()
}
s := api.New(o.Tags, o.Storer, o.Resolver, o.Pss, o.Traversal, o.Pinning, o.Feeds, o.Post, o.PostageContract, signer, o.Logger, nil, api.Options{
CORSAllowedOrigins: o.CORSAllowedOrigins,
GatewayMode: o.GatewayMode,
WsPingPeriod: o.WsPingPeriod,
......@@ -116,11 +146,11 @@ func request(t *testing.T, client *http.Client, method, resource string, body io
func TestParseName(t *testing.T) {
const bzzHash = "89c17d0d8018a19057314aa035e61c9d23c47581a61dd3a79a7839692c617e4d"
log := logging.New(ioutil.Discard, 0)
testCases := []struct {
desc string
name string
log logging.Logger
res resolver.Interface
noResolver bool
wantAdr swarm.Address
......@@ -165,9 +195,6 @@ func TestParseName(t *testing.T) {
},
}
for _, tC := range testCases {
if tC.log == nil {
tC.log = logging.New(ioutil.Discard, 0)
}
if tC.res == nil && !tC.noResolver {
tC.res = resolverMock.NewResolver(
resolverMock.WithResolveFunc(func(string) (swarm.Address, error) {
......@@ -175,7 +202,11 @@ func TestParseName(t *testing.T) {
}))
}
s := api.New(nil, nil, tC.res, nil, nil, nil, nil, tC.log, nil, api.Options{}).(*api.Server)
pk, _ := crypto.GenerateSecp256k1Key()
signer := crypto.NewDefaultSigner(pk)
mockPostage := mockpost.New()
s := api.New(nil, nil, tC.res, nil, nil, nil, nil, mockPostage, nil, signer, log, nil, api.Options{}).(*api.Server)
t.Run(tC.desc, func(t *testing.T) {
got, err := s.ResolveNameOrAddress(tC.name)
......@@ -226,3 +257,54 @@ func TestCalculateNumberOfChunksEncrypted(t *testing.T) {
}
}
}
// TestPostageHeaderError tests that incorrect postage batch ids
// provided to the api correct the appropriate error code.
func TestPostageHeaderError(t *testing.T) {
var (
mockStorer = mock.NewStorer()
mockStatestore = statestore.NewStateStore()
logger = logging.New(ioutil.Discard, 5)
mp = mockpost.New(mockpost.WithIssuer(postage.NewStampIssuer("", "", batchOk, 11, 10)))
client, _, _ = newTestServer(t, testServerOptions{
Storer: mockStorer,
Tags: tags.NewTags(mockStatestore, logger),
Logger: logger,
Post: mp,
})
endpoints = []string{
"bytes", "bzz", "chunks",
}
)
content := []byte{7: 0} // 8 zeros
for _, endpoint := range endpoints {
t.Run(endpoint+": empty batch", func(t *testing.T) {
hexbatch := hex.EncodeToString(batchEmpty)
expCode := http.StatusBadRequest
jsonhttptest.Request(t, client, http.MethodPost, "/"+endpoint, expCode,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "application/octet-stream"),
jsonhttptest.WithRequestBody(bytes.NewReader(content)),
)
})
t.Run(endpoint+": ok batch", func(t *testing.T) {
hexbatch := hex.EncodeToString(batchOk)
expCode := http.StatusOK
jsonhttptest.Request(t, client, http.MethodPost, "/"+endpoint, expCode,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "application/octet-stream"),
jsonhttptest.WithRequestBody(bytes.NewReader(content)),
)
})
t.Run(endpoint+": bad batch", func(t *testing.T) {
hexbatch := hex.EncodeToString(batchInvalid)
expCode := http.StatusBadRequest
jsonhttptest.Request(t, client, http.MethodPost, "/"+endpoint, expCode,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "application/octet-stream"),
jsonhttptest.WithRequestBody(bytes.NewReader(content)),
)
})
}
}
......@@ -9,7 +9,6 @@ import (
"net/http"
"strings"
"github.com/ethersphere/bee/pkg/file/pipeline/builder"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/sctx"
"github.com/ethersphere/bee/pkg/swarm"
......@@ -50,14 +49,31 @@ func (s *server) bytesUploadHandler(w http.ResponseWriter, r *http.Request) {
// Add the tag to the context
ctx := sctx.SetTag(r.Context(), tag)
pipe := builder.NewPipelineBuilder(ctx, s.storer, requestModePut(r), requestEncrypt(r))
address, err := builder.FeedPipeline(ctx, pipe, r.Body, r.ContentLength)
batch, err := requestPostageBatchId(r)
if err != nil {
logger.Debugf("bytes upload: postage batch id:%v", err)
logger.Error("bytes upload: postage batch id")
jsonhttp.BadRequest(w, nil)
return
}
putter, err := newStamperPutter(s.storer, s.post, s.signer, batch)
if err != nil {
logger.Debugf("bytes upload: get putter:%v", err)
logger.Error("bytes upload: putter")
jsonhttp.BadRequest(w, nil)
return
}
p := requestPipelineFn(putter, r)
address, err := p(ctx, r.Body, r.ContentLength)
if err != nil {
logger.Debugf("bytes upload: split write all: %v", err)
logger.Error("bytes upload: split write all")
jsonhttp.InternalServerError(w, nil)
return
}
if created {
_, err = tag.DoneSplit(address)
if err != nil {
......
......@@ -16,6 +16,7 @@ import (
"github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest"
"github.com/ethersphere/bee/pkg/logging"
pinning "github.com/ethersphere/bee/pkg/pinning/mock"
mockpost "github.com/ethersphere/bee/pkg/postage/mock"
statestore "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/storage/mock"
"github.com/ethersphere/bee/pkg/swarm"
......@@ -35,11 +36,13 @@ func TestBytes(t *testing.T) {
var (
storerMock = mock.NewStorer()
pinningMock = pinning.NewServiceMock()
logger = logging.New(ioutil.Discard, 0)
client, _, _ = newTestServer(t, testServerOptions{
Storer: storerMock,
Tags: tags.NewTags(statestore.NewStateStore(), logging.New(ioutil.Discard, 0)),
Pinning: pinningMock,
Logger: logging.New(ioutil.Discard, 5),
Logger: logger,
Post: mockpost.New(mockpost.WithAcceptAll()),
})
)
......@@ -52,6 +55,7 @@ func TestBytes(t *testing.T) {
t.Run("upload", func(t *testing.T) {
chunkAddr := swarm.MustParseHexAddress(expHash)
jsonhttptest.Request(t, client, http.MethodPost, resource, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(content)),
jsonhttptest.WithExpectedJSONResponse(api.BytesPostResponse{
Reference: chunkAddr,
......@@ -74,6 +78,7 @@ func TestBytes(t *testing.T) {
t.Run("upload-with-pinning", func(t *testing.T) {
var res api.BytesPostResponse
jsonhttptest.Request(t, client, http.MethodPost, resource, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(content)),
jsonhttptest.WithRequestHeader(api.SwarmPinHeader, "true"),
jsonhttptest.WithUnmarshalJSONResponse(&res),
......
......@@ -43,15 +43,32 @@ func (s *server) bzzUploadHandler(w http.ResponseWriter, r *http.Request) {
if err != nil {
logger.Debugf("bzz upload: parse content type header %q: %v", contentType, err)
logger.Errorf("bzz upload: parse content type header %q", contentType)
jsonhttp.BadRequest(w, invalidContentType)
jsonhttp.BadRequest(w, errInvalidContentType)
return
}
batch, err := requestPostageBatchId(r)
if err != nil {
logger.Debugf("bzz upload: postage batch id: %v", err)
logger.Error("bzz upload: postage batch id")
jsonhttp.BadRequest(w, "invalid postage batch id")
return
}
putter, err := newStamperPutter(s.storer, s.post, s.signer, batch)
if err != nil {
logger.Debugf("bzz upload: putter: %v", err)
logger.Error("bzz upload: putter")
jsonhttp.BadRequest(w, nil)
return
}
isDir := r.Header.Get(SwarmCollectionHeader)
if strings.ToLower(isDir) == "true" || mediaType == multiPartFormData {
s.dirUploadHandler(w, r)
s.dirUploadHandler(w, r, putter)
return
}
s.fileUploadHandler(w, r)
s.fileUploadHandler(w, r, putter)
}
// fileUploadResponse is returned when an HTTP request to upload a file is successful
......@@ -61,7 +78,7 @@ type bzzUploadResponse struct {
// fileUploadHandler uploads the file and its metadata supplied in the file body and
// the headers
func (s *server) fileUploadHandler(w http.ResponseWriter, r *http.Request) {
func (s *server) fileUploadHandler(w http.ResponseWriter, r *http.Request, storer storage.Storer) {
logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger)
var (
reader io.Reader
......@@ -105,7 +122,7 @@ func (s *server) fileUploadHandler(w http.ResponseWriter, r *http.Request) {
if err != nil {
logger.Debugf("bzz upload file: content length, file %q: %v", fileName, err)
logger.Errorf("bzz upload file: content length, file %q", fileName)
jsonhttp.BadRequest(w, invalidContentLength)
jsonhttp.BadRequest(w, errInvalidContentLength)
return
}
} else {
......@@ -136,14 +153,14 @@ func (s *server) fileUploadHandler(w http.ResponseWriter, r *http.Request) {
reader = tmp
}
p := requestPipelineFn(s.storer, r)
p := requestPipelineFn(storer, r)
// first store the file and get its reference
fr, err := p(ctx, reader, int64(fileSize))
if err != nil {
logger.Debugf("bzz upload file: file store, file %q: %v", fileName, err)
logger.Errorf("bzz upload file: file store, file %q", fileName)
jsonhttp.InternalServerError(w, fileStoreError)
jsonhttp.InternalServerError(w, errFileStore)
return
}
......@@ -153,7 +170,7 @@ func (s *server) fileUploadHandler(w http.ResponseWriter, r *http.Request) {
}
encrypt := requestEncrypt(r)
l := loadsave.New(s.storer, requestModePut(r), encrypt)
l := loadsave.New(storer, requestModePut(r), encrypt)
m, err := manifest.NewDefaultManifest(l, encrypt)
if err != nil {
......
......@@ -24,6 +24,7 @@ import (
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/manifest"
pinning "github.com/ethersphere/bee/pkg/pinning/mock"
mockpost "github.com/ethersphere/bee/pkg/postage/mock"
statestore "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/storage"
smock "github.com/ethersphere/bee/pkg/storage/mock"
......@@ -46,12 +47,14 @@ func TestBzzFiles(t *testing.T) {
Pinning: pinningMock,
Tags: tags.NewTags(statestoreMock, logger),
Logger: logger,
Post: mockpost.New(mockpost.WithAcceptAll()),
})
)
t.Run("invalid-content-type", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource,
http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Message: api.InvalidContentType.Error(),
......@@ -89,6 +92,7 @@ func TestBzzFiles(t *testing.T) {
})
address := swarm.MustParseHexAddress("f30c0aa7e9e2a0ef4c9b1b750ebfeaeb7c7c24da700bb089da19a46e3677824b")
jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(tr),
jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar),
jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
......@@ -138,6 +142,7 @@ func TestBzzFiles(t *testing.T) {
})
address := swarm.MustParseHexAddress("f30c0aa7e9e2a0ef4c9b1b750ebfeaeb7c7c24da700bb089da19a46e3677824b")
jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestHeader(api.SwarmPinHeader, "true"),
jsonhttptest.WithRequestBody(tr),
jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar),
......@@ -172,6 +177,7 @@ func TestBzzFiles(t *testing.T) {
var resp api.BzzUploadResponse
jsonhttptest.Request(t, client, http.MethodPost,
fileUploadResource+"?name="+fileName, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithRequestHeader(api.SwarmEncryptHeader, "True"),
jsonhttptest.WithRequestHeader("Content-Type", "image/jpeg; charset=utf-8"),
......@@ -202,6 +208,7 @@ func TestBzzFiles(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost,
fileUploadResource+"?name="+fileName, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
......@@ -242,6 +249,7 @@ func TestBzzFiles(t *testing.T) {
rcvdHeader := jsonhttptest.Request(t, client, http.MethodPost,
fileUploadResource+"?name="+fileName, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(strings.NewReader(sampleHtml)),
jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
......@@ -280,6 +288,7 @@ func TestBzzFiles(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost,
fileUploadResource+"?name="+fileName, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
......@@ -392,11 +401,13 @@ func TestBzzFilesRangeRequests(t *testing.T) {
Storer: smock.NewStorer(),
Tags: tags.NewTags(mockStatestore, logger),
Logger: logger,
Post: mockpost.New(mockpost.WithAcceptAll()),
})
var resp api.BzzUploadResponse
testOpts := []jsonhttptest.Option{
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(upload.reader),
jsonhttptest.WithRequestHeader("Content-Type", upload.contentType),
jsonhttptest.WithUnmarshalJSONResponse(&resp),
......@@ -505,6 +516,7 @@ func TestFeedIndirection(t *testing.T) {
Storer: storer,
Tags: tags.NewTags(mockStatestore, logger),
Logger: logger,
Post: mockpost.New(mockpost.WithAcceptAll()),
})
)
// tar all the test case files
......@@ -520,6 +532,7 @@ func TestFeedIndirection(t *testing.T) {
var resp api.BzzUploadResponse
options := []jsonhttptest.Option{
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(tarReader),
jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar),
jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
......
......@@ -84,7 +84,23 @@ func (s *server) chunkUploadHandler(w http.ResponseWriter, r *http.Request) {
return
}
seen, err := s.storer.Put(ctx, requestModePut(r), chunk)
batch, err := requestPostageBatchId(r)
if err != nil {
s.logger.Debugf("chunk upload: postage batch id: %v", err)
s.logger.Error("chunk upload: postage batch id")
jsonhttp.BadRequest(w, "invalid postage batch id")
return
}
putter, err := newStamperPutter(s.storer, s.post, s.signer, batch)
if err != nil {
s.logger.Debugf("chunk upload: putter:%v", err)
s.logger.Error("chunk upload: putter")
jsonhttp.BadRequest(w, nil)
return
}
seen, err := putter.Put(ctx, requestModePut(r), chunk)
if err != nil {
s.logger.Debugf("chunk upload: chunk write error: %v, addr %s", err, chunk.Address())
s.logger.Error("chunk upload: chunk write error")
......
......@@ -13,6 +13,7 @@ import (
"github.com/ethersphere/bee/pkg/logging"
pinning "github.com/ethersphere/bee/pkg/pinning/mock"
mockpost "github.com/ethersphere/bee/pkg/postage/mock"
statestore "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/tags"
......@@ -45,11 +46,13 @@ func TestChunkUploadDownload(t *testing.T) {
Storer: storerMock,
Pinning: pinningMock,
Tags: tag,
Post: mockpost.New(mockpost.WithAcceptAll()),
})
)
t.Run("empty chunk", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost, chunksEndpoint, http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Message: "data length",
Code: http.StatusBadRequest,
......@@ -59,6 +62,7 @@ func TestChunkUploadDownload(t *testing.T) {
t.Run("ok", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost, chunksEndpoint, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(chunk.Data())),
jsonhttptest.WithExpectedJSONResponse(api.ChunkAddressResponse{Reference: chunk.Address()}),
)
......@@ -77,6 +81,7 @@ func TestChunkUploadDownload(t *testing.T) {
t.Run("pin-invalid-value", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost, chunksEndpoint, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(chunk.Data())),
jsonhttptest.WithExpectedJSONResponse(api.ChunkAddressResponse{Reference: chunk.Address()}),
jsonhttptest.WithRequestHeader(api.SwarmPinHeader, "invalid-pin"),
......@@ -89,6 +94,7 @@ func TestChunkUploadDownload(t *testing.T) {
})
t.Run("pin-header-missing", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost, chunksEndpoint, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(chunk.Data())),
jsonhttptest.WithExpectedJSONResponse(api.ChunkAddressResponse{Reference: chunk.Address()}),
)
......@@ -101,6 +107,7 @@ func TestChunkUploadDownload(t *testing.T) {
t.Run("pin-ok", func(t *testing.T) {
address := chunk.Address()
jsonhttptest.Request(t, client, http.MethodPost, chunksEndpoint, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(chunk.Data())),
jsonhttptest.WithExpectedJSONResponse(api.ChunkAddressResponse{Reference: address}),
jsonhttptest.WithRequestHeader(api.SwarmPinHeader, "True"),
......
......@@ -24,17 +24,18 @@ import (
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/manifest"
"github.com/ethersphere/bee/pkg/sctx"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/tracing"
)
// dirUploadHandler uploads a directory supplied as a tar in an HTTP request
func (s *server) dirUploadHandler(w http.ResponseWriter, r *http.Request) {
func (s *server) dirUploadHandler(w http.ResponseWriter, r *http.Request, storer storage.Storer) {
logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger)
if r.Body == http.NoBody {
logger.Error("bzz upload dir: request has no body")
jsonhttp.BadRequest(w, invalidRequest)
jsonhttp.BadRequest(w, errInvalidRequest)
return
}
contentType := r.Header.Get(contentTypeHeader)
......@@ -42,7 +43,7 @@ func (s *server) dirUploadHandler(w http.ResponseWriter, r *http.Request) {
if err != nil {
logger.Errorf("bzz upload dir: invalid content-type")
logger.Debugf("bzz upload dir: invalid content-type err: %v", err)
jsonhttp.BadRequest(w, invalidContentType)
jsonhttp.BadRequest(w, errInvalidContentType)
return
}
......@@ -54,7 +55,7 @@ func (s *server) dirUploadHandler(w http.ResponseWriter, r *http.Request) {
dReader = &multipartReader{r: multipart.NewReader(r.Body, params["boundary"])}
default:
logger.Error("bzz upload dir: invalid content-type for directory upload")
jsonhttp.BadRequest(w, invalidContentType)
jsonhttp.BadRequest(w, errInvalidContentType)
return
}
defer r.Body.Close()
......@@ -68,13 +69,15 @@ func (s *server) dirUploadHandler(w http.ResponseWriter, r *http.Request) {
}
// Add the tag to the context
ctx := sctx.SetTag(r.Context(), tag)
reference, err := storeDir(
sctx.SetTag(r.Context(), tag),
ctx,
requestEncrypt(r),
dReader,
s.logger,
requestPipelineFn(s.storer, r),
loadsave.New(s.storer, requestModePut(r), requestEncrypt(r)),
requestPipelineFn(storer, r),
loadsave.New(storer, requestModePut(r), requestEncrypt(r)),
r.Header.Get(SwarmIndexDocumentHeader),
r.Header.Get(SwarmErrorDocumentHeader),
tag,
......@@ -83,7 +86,7 @@ func (s *server) dirUploadHandler(w http.ResponseWriter, r *http.Request) {
if err != nil {
logger.Debugf("bzz upload dir: store dir err: %v", err)
logger.Errorf("bzz upload dir: store dir")
jsonhttp.InternalServerError(w, directoryStoreError)
jsonhttp.InternalServerError(w, errDirectoryStore)
return
}
if created {
......
......@@ -24,6 +24,7 @@ import (
"github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/manifest"
mockpost "github.com/ethersphere/bee/pkg/postage/mock"
statestore "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage/mock"
......@@ -44,12 +45,14 @@ func TestDirs(t *testing.T) {
Tags: tags.NewTags(mockStatestore, logger),
Logger: logger,
PreventRedirect: true,
Post: mockpost.New(mockpost.WithAcceptAll()),
})
)
t.Run("empty request body", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost, dirUploadResource,
http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(nil)),
jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
......@@ -65,6 +68,7 @@ func TestDirs(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost, dirUploadResource,
http.StatusInternalServerError,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(file),
jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
......@@ -84,6 +88,7 @@ func TestDirs(t *testing.T) {
// submit valid tar, but with wrong content-type
jsonhttptest.Request(t, client, http.MethodPost, dirUploadResource,
http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(tarReader),
jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
......@@ -382,6 +387,7 @@ func TestDirs(t *testing.T) {
var resp api.BzzUploadResponse
options := []jsonhttptest.Option{
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(tarReader),
jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar),
......@@ -414,6 +420,7 @@ func TestDirs(t *testing.T) {
var resp api.BzzUploadResponse
options := []jsonhttptest.Option{
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(mwReader),
jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
jsonhttptest.WithRequestHeader("Content-Type", fmt.Sprintf("multipart/form-data; boundary=%q", mwBoundary)),
......
......@@ -17,16 +17,20 @@ type (
TagResponse = tagResponse
TagRequest = tagRequest
ListTagsResponse = listTagsResponse
PostageCreateResponse = postageCreateResponse
PostageStampResponse = postageStampResponse
PostageStampsResponse = postageStampsResponse
)
var (
InvalidContentType = invalidContentType
InvalidRequest = invalidRequest
DirectoryStoreError = directoryStoreError
InvalidContentType = errInvalidContentType
InvalidRequest = errInvalidRequest
DirectoryStoreError = errDirectoryStore
)
var (
ContentTypeTar = contentTypeTar
ContentTypeTar = contentTypeTar
ContentTypeHeader = contentTypeHeader
)
var (
......
......@@ -139,7 +139,24 @@ func (s *server) feedPostHandler(w http.ResponseWriter, r *http.Request) {
jsonhttp.BadRequest(w, "bad topic")
return
}
l := loadsave.New(s.storer, requestModePut(r), false)
batch, err := requestPostageBatchId(r)
if err != nil {
s.logger.Debugf("feed put: postage batch id: %v", err)
s.logger.Error("feed put: postage batch id")
jsonhttp.BadRequest(w, "invalid postage batch id")
return
}
putter, err := newStamperPutter(s.storer, s.post, s.signer, batch)
if err != nil {
s.logger.Debugf("feed put: putter: %v", err)
s.logger.Error("feed put: putter")
jsonhttp.BadRequest(w, nil)
return
}
l := loadsave.New(putter, requestModePut(r), false)
feedManifest, err := manifest.NewDefaultManifest(l, false)
if err != nil {
s.logger.Debugf("feed put: new manifest: %v", err)
......
......@@ -22,6 +22,8 @@ import (
"github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/manifest"
"github.com/ethersphere/bee/pkg/postage"
mockpost "github.com/ethersphere/bee/pkg/postage/mock"
testingsoc "github.com/ethersphere/bee/pkg/soc/testing"
statestore "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/storage"
......@@ -152,17 +154,20 @@ func TestFeed_Post(t *testing.T) {
logger = logging.New(ioutil.Discard, 0)
tag = tags.NewTags(mockStatestore, logger)
topic = "aabbcc"
mp = mockpost.New(mockpost.WithIssuer(postage.NewStampIssuer("", "", batchOk, 11, 10)))
mockStorer = mock.NewStorer()
client, _, _ = newTestServer(t, testServerOptions{
Storer: mockStorer,
Tags: tag,
Logger: logger,
Post: mp,
})
url = fmt.Sprintf("/feeds/%s/%s?type=%s", ownerString, topic, "sequence")
)
t.Run("ok", func(t *testing.T) {
url := fmt.Sprintf("/feeds/%s/%s?type=%s", ownerString, topic, "sequence")
jsonhttptest.Request(t, client, http.MethodPost, url, http.StatusCreated,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithExpectedJSONResponse(api.FeedReferenceResponse{
Reference: expReference,
}),
......@@ -189,6 +194,31 @@ func TestFeed_Post(t *testing.T) {
t.Fatalf("type mismatch. got %s want %s", e, "Sequence")
}
})
t.Run("postage", func(t *testing.T) {
t.Run("err - bad batch", func(t *testing.T) {
hexbatch := hex.EncodeToString(batchInvalid)
jsonhttptest.Request(t, client, http.MethodPost, url, http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Message: "invalid postage batch id",
Code: http.StatusBadRequest,
}))
})
t.Run("ok - batch zeros", func(t *testing.T) {
hexbatch := hex.EncodeToString(batchOk)
jsonhttptest.Request(t, client, http.MethodPost, url, http.StatusCreated,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
)
})
t.Run("bad request - batch empty", func(t *testing.T) {
hexbatch := hex.EncodeToString(batchEmpty)
jsonhttptest.Request(t, client, http.MethodPost, url, http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
)
})
})
}
type factoryMock struct {
......
......@@ -14,6 +14,7 @@ import (
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest"
"github.com/ethersphere/bee/pkg/logging"
mockpost "github.com/ethersphere/bee/pkg/postage/mock"
statestore "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/storage/mock"
testingc "github.com/ethersphere/bee/pkg/storage/testing"
......@@ -28,6 +29,7 @@ func TestGatewayMode(t *testing.T) {
Tags: tags.NewTags(statestore.NewStateStore(), logger),
Logger: logger,
GatewayMode: true,
Post: mockpost.New(mockpost.WithAcceptAll()),
})
forbiddenResponseOption := jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
......@@ -66,12 +68,14 @@ func TestGatewayMode(t *testing.T) {
// should work without pinning
jsonhttptest.Request(t, client, http.MethodPost, "/chunks", http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(chunk.Data())),
)
jsonhttptest.Request(t, client, http.MethodPost, "/chunks/0773a91efd6547c754fc1d95fb1c62c7d1b47f959c2caa685dfec8736da95c1c", http.StatusForbidden, forbiddenResponseOption, headerOption)
jsonhttptest.Request(t, client, http.MethodPost, "/bytes", http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(chunk.Data())),
) // should work without pinning
jsonhttptest.Request(t, client, http.MethodPost, "/bytes", http.StatusForbidden, forbiddenResponseOption, headerOption)
......@@ -88,6 +92,7 @@ func TestGatewayMode(t *testing.T) {
})
jsonhttptest.Request(t, client, http.MethodPost, "/bytes", http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(chunk.Data())),
) // should work without pinning
jsonhttptest.Request(t, client, http.MethodPost, "/bytes", http.StatusForbidden, forbiddenResponseOption, headerOption)
......
......@@ -16,6 +16,7 @@ import (
"github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest"
"github.com/ethersphere/bee/pkg/logging"
pinning "github.com/ethersphere/bee/pkg/pinning/mock"
mockpost "github.com/ethersphere/bee/pkg/postage/mock"
statestore "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/storage/mock"
testingc "github.com/ethersphere/bee/pkg/storage/testing"
......@@ -86,12 +87,14 @@ func TestPinHandlers(t *testing.T) {
Tags: tags.NewTags(statestore.NewStateStore(), logging.New(ioutil.Discard, 0)),
Pinning: pinning.NewServiceMock(),
Logger: logging.New(ioutil.Discard, 5),
Post: mockpost.New(mockpost.WithAcceptAll()),
})
)
t.Run("bytes", func(t *testing.T) {
const rootHash = "838d0a193ecd1152d1bb1432d5ecc02398533b2494889e23b8bd5ace30ac2aeb"
jsonhttptest.Request(t, client, http.MethodPost, "/bytes", http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(strings.NewReader("this is a simple text")),
jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
......@@ -108,6 +111,7 @@ func TestPinHandlers(t *testing.T) {
}})
rootHash := "9e178dbd1ed4b748379e25144e28dfb29c07a4b5114896ef454480115a56b237"
jsonhttptest.Request(t, client, http.MethodPost, "/bzz", http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(tarReader),
jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar),
jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
......@@ -119,6 +123,7 @@ func TestPinHandlers(t *testing.T) {
rootHash = "dd13a5a6cc9db3ef514d645e6719178dbfb1a90b49b9262cafce35b0d27cf245"
jsonhttptest.Request(t, client, http.MethodPost, "/bzz?name=somefile.txt", http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestHeader("Content-Type", "text/plain"),
jsonhttptest.WithRequestBody(strings.NewReader("this is a simple text")),
jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
......@@ -134,6 +139,7 @@ func TestPinHandlers(t *testing.T) {
rootHash = chunk.Address().String()
)
jsonhttptest.Request(t, client, http.MethodPost, "/chunks", http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(chunk.Data())),
jsonhttptest.WithExpectedJSONResponse(api.ChunkAddressResponse{
Reference: chunk.Address(),
......
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package api
import (
"encoding/hex"
"encoding/json"
"errors"
"math/big"
"net/http"
"strconv"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/postage/postagecontract"
"github.com/gorilla/mux"
)
type batchID []byte
func (b batchID) MarshalJSON() ([]byte, error) {
return json.Marshal(hex.EncodeToString(b))
}
type postageCreateResponse struct {
BatchID batchID `json:"batchID"`
}
func (s *server) postageCreateHandler(w http.ResponseWriter, r *http.Request) {
depthStr := mux.Vars(r)["depth"]
amount, ok := big.NewInt(0).SetString(mux.Vars(r)["amount"], 10)
if !ok {
s.logger.Error("create batch: invalid amount")
jsonhttp.BadRequest(w, "invalid postage amount")
return
}
depth, err := strconv.ParseUint(depthStr, 10, 8)
if err != nil {
s.logger.Debugf("create batch: invalid depth: %v", err)
s.logger.Error("create batch: invalid depth")
jsonhttp.BadRequest(w, "invalid depth")
return
}
label := r.URL.Query().Get("label")
batchID, err := s.postageContract.CreateBatch(r.Context(), amount, uint8(depth), label)
if err != nil {
if errors.Is(err, postagecontract.ErrInsufficientFunds) {
s.logger.Debugf("create batch: out of funds: %v", err)
s.logger.Error("create batch: out of funds")
jsonhttp.BadRequest(w, "out of funds")
return
}
if errors.Is(err, postagecontract.ErrInvalidDepth) {
s.logger.Debugf("create batch: invalid depth: %v", err)
s.logger.Error("create batch: invalid depth")
jsonhttp.BadRequest(w, "invalid depth")
return
}
s.logger.Debugf("create batch: failed to create: %v", err)
s.logger.Error("create batch: failed to create")
jsonhttp.InternalServerError(w, "cannot create batch")
return
}
jsonhttp.OK(w, &postageCreateResponse{
BatchID: batchID,
})
}
type postageStampResponse struct {
BatchID batchID `json:"batchID"`
Utilization uint32 `json:"utilization"`
}
type postageStampsResponse struct {
Stamps []postageStampResponse `json:"stamps"`
}
func (s *server) postageGetStampsHandler(w http.ResponseWriter, r *http.Request) {
issuers := s.post.StampIssuers()
resp := postageStampsResponse{}
for _, v := range issuers {
issuer := postageStampResponse{BatchID: v.ID(), Utilization: v.Utilization()}
resp.Stamps = append(resp.Stamps, issuer)
}
jsonhttp.OK(w, resp)
}
func (s *server) postageGetStampHandler(w http.ResponseWriter, r *http.Request) {
idStr := mux.Vars(r)["id"]
if idStr == "" || len(idStr) != 64 {
s.logger.Error("get stamp issuer: invalid batchID")
jsonhttp.BadRequest(w, "invalid batchID")
return
}
id, err := hex.DecodeString(idStr)
if err != nil {
s.logger.Error("get stamp issuer: invalid batchID: %v", err)
s.logger.Error("get stamp issuer: invalid batchID")
jsonhttp.BadRequest(w, "invalid batchID")
return
}
issuer, err := s.post.GetStampIssuer(id)
if err != nil {
s.logger.Error("get stamp issuer: get issuer: %v", err)
s.logger.Error("get stamp issuer: get issuer")
jsonhttp.BadRequest(w, "cannot get issuer")
return
}
resp := postageStampResponse{
BatchID: id,
Utilization: issuer.Utilization(),
}
jsonhttp.OK(w, &resp)
}
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package api_test
import (
"context"
"encoding/hex"
"errors"
"fmt"
"math/big"
"net/http"
"testing"
"github.com/ethersphere/bee/pkg/api"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest"
"github.com/ethersphere/bee/pkg/postage"
mockpost "github.com/ethersphere/bee/pkg/postage/mock"
"github.com/ethersphere/bee/pkg/postage/postagecontract"
contractMock "github.com/ethersphere/bee/pkg/postage/postagecontract/mock"
)
func TestPostageCreateStamp(t *testing.T) {
batchID := []byte{1, 2, 3, 4}
initialBalance := int64(1000)
depth := uint8(1)
label := "label"
createBatch := func(amount int64, depth uint8, label string) string {
return fmt.Sprintf("/stamps/%d/%d?label=%s", amount, depth, label)
}
t.Run("ok", func(t *testing.T) {
contract := contractMock.New(
contractMock.WithCreateBatchFunc(func(ctx context.Context, ib *big.Int, d uint8, l string) ([]byte, error) {
if ib.Cmp(big.NewInt(initialBalance)) != 0 {
return nil, fmt.Errorf("called with wrong initial balance. wanted %d, got %d", initialBalance, ib)
}
if d != depth {
return nil, fmt.Errorf("called with wrong depth. wanted %d, got %d", depth, d)
}
if l != label {
return nil, fmt.Errorf("called with wrong label. wanted %s, got %s", label, l)
}
return batchID, nil
}),
)
client, _, _ := newTestServer(t, testServerOptions{
PostageContract: contract,
})
jsonhttptest.Request(t, client, http.MethodPost, createBatch(initialBalance, depth, label), http.StatusOK,
jsonhttptest.WithExpectedJSONResponse(&api.PostageCreateResponse{
BatchID: batchID,
}),
)
})
t.Run("with-error", func(t *testing.T) {
contract := contractMock.New(
contractMock.WithCreateBatchFunc(func(ctx context.Context, ib *big.Int, d uint8, l string) ([]byte, error) {
return nil, errors.New("err")
}),
)
client, _, _ := newTestServer(t, testServerOptions{
PostageContract: contract,
})
jsonhttptest.Request(t, client, http.MethodPost, createBatch(initialBalance, depth, label), http.StatusInternalServerError,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusInternalServerError,
Message: "cannot create batch",
}),
)
})
t.Run("out-of-funds", func(t *testing.T) {
contract := contractMock.New(
contractMock.WithCreateBatchFunc(func(ctx context.Context, ib *big.Int, d uint8, l string) ([]byte, error) {
return nil, postagecontract.ErrInsufficientFunds
}),
)
client, _, _ := newTestServer(t, testServerOptions{
PostageContract: contract,
})
jsonhttptest.Request(t, client, http.MethodPost, createBatch(initialBalance, depth, label), http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "out of funds",
}),
)
})
t.Run("invalid depth", func(t *testing.T) {
client, _, _ := newTestServer(t, testServerOptions{})
jsonhttptest.Request(t, client, http.MethodPost, "/stamps/1000/ab", http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid depth",
}),
)
})
t.Run("depth less than bucket depth", func(t *testing.T) {
contract := contractMock.New(
contractMock.WithCreateBatchFunc(func(ctx context.Context, ib *big.Int, d uint8, l string) ([]byte, error) {
return nil, postagecontract.ErrInvalidDepth
}),
)
client, _, _ := newTestServer(t, testServerOptions{
PostageContract: contract,
})
jsonhttptest.Request(t, client, http.MethodPost, "/stamps/1000/9", http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid depth",
}),
)
})
t.Run("invalid balance", func(t *testing.T) {
client, _, _ := newTestServer(t, testServerOptions{})
jsonhttptest.Request(t, client, http.MethodPost, "/stamps/abcd/2", http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid postage amount",
}),
)
})
}
func TestPostageGetStamps(t *testing.T) {
mp := mockpost.New(mockpost.WithIssuer(postage.NewStampIssuer("", "", batchOk, 11, 10)))
client, _, _ := newTestServer(t, testServerOptions{Post: mp})
jsonhttptest.Request(t, client, http.MethodGet, "/stamps", http.StatusOK,
jsonhttptest.WithExpectedJSONResponse(&api.PostageStampsResponse{
Stamps: []api.PostageStampResponse{
{
BatchID: batchOk,
Utilization: 0,
},
},
}),
)
}
func TestPostageGetStamp(t *testing.T) {
mp := mockpost.New(mockpost.WithIssuer(postage.NewStampIssuer("", "", batchOk, 11, 10)))
client, _, _ := newTestServer(t, testServerOptions{Post: mp})
t.Run("ok", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodGet, "/stamps/"+batchOkStr, http.StatusOK,
jsonhttptest.WithExpectedJSONResponse(&api.PostageStampResponse{
BatchID: batchOk,
Utilization: 0,
}),
)
})
t.Run("ok", func(t *testing.T) {
badBatch := []byte{0, 1, 2}
jsonhttptest.Request(t, client, http.MethodGet, "/stamps/"+hex.EncodeToString(badBatch), http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid batchID",
}),
)
})
t.Run("ok", func(t *testing.T) {
badBatch := []byte{0, 1, 2, 4}
jsonhttptest.Request(t, client, http.MethodGet, "/stamps/"+hex.EncodeToString(badBatch), http.StatusBadRequest,
jsonhttptest.WithExpectedJSONResponse(&jsonhttp.StatusResponse{
Code: http.StatusBadRequest,
Message: "invalid batchID",
}),
)
})
}
......@@ -15,6 +15,7 @@ import (
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/pss"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/gorilla/mux"
......@@ -69,8 +70,23 @@ func (s *server) pssPostHandler(w http.ResponseWriter, r *http.Request) {
jsonhttp.InternalServerError(w, nil)
return
}
batch, err := requestPostageBatchId(r)
if err != nil {
s.logger.Debugf("pss: postage batch id: %v", err)
s.logger.Error("pss: postage batch id")
jsonhttp.BadRequest(w, "invalid postage batch id")
return
}
i, err := s.post.GetStampIssuer(batch)
if err != nil {
s.logger.Debugf("pss: postage batch issuer: %v", err)
s.logger.Error("pss: postage batch issue")
jsonhttp.BadRequest(w, "postage stamp issuer")
return
}
stamper := postage.NewStamper(i, s.signer)
err = s.pss.Send(r.Context(), topic, payload, recipient, targets)
err = s.pss.Send(r.Context(), topic, payload, stamper, recipient, targets)
if err != nil {
s.logger.Debugf("pss send payload: %v. topic: %s", err, topicVar)
s.logger.Error("pss send payload")
......
......@@ -18,10 +18,13 @@ import (
"time"
"github.com/btcsuite/btcd/btcec"
"github.com/ethersphere/bee/pkg/api"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/postage"
mockpost "github.com/ethersphere/bee/pkg/postage/mock"
"github.com/ethersphere/bee/pkg/pss"
"github.com/ethersphere/bee/pkg/pushsync"
"github.com/ethersphere/bee/pkg/storage/mock"
......@@ -182,12 +185,13 @@ func TestPssSend(t *testing.T) {
mtx.Unlock()
return err
}
mp = mockpost.New(mockpost.WithIssuer(postage.NewStampIssuer("", "", batchOk, 11, 10)))
p = newMockPss(sendFn)
client, _, _ = newTestServer(t, testServerOptions{
Pss: p,
Storer: mock.NewStorer(),
Logger: logger,
Post: mp,
})
recipient = hex.EncodeToString(publicKeyBytes)
......@@ -211,8 +215,36 @@ func TestPssSend(t *testing.T) {
)
})
t.Run("err - bad batch", func(t *testing.T) {
hexbatch := hex.EncodeToString(batchInvalid)
jsonhttptest.Request(t, client, http.MethodPost, "/pss/send/to/12", http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
jsonhttptest.WithRequestBody(bytes.NewReader(payload)),
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Message: "invalid postage batch id",
Code: http.StatusBadRequest,
}),
)
})
t.Run("ok batch", func(t *testing.T) {
hexbatch := hex.EncodeToString(batchOk)
jsonhttptest.Request(t, client, http.MethodPost, "/pss/send/to/12", http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
jsonhttptest.WithRequestBody(bytes.NewReader(payload)),
)
})
t.Run("bad request - batch empty", func(t *testing.T) {
hexbatch := hex.EncodeToString(batchEmpty)
jsonhttptest.Request(t, client, http.MethodPost, "/pss/send/to/12", http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
jsonhttptest.WithRequestBody(bytes.NewReader(payload)),
)
})
t.Run("ok", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost, "/pss/send/testtopic/12?recipient="+recipient, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(payload)),
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Message: "OK",
......@@ -233,6 +265,7 @@ func TestPssSend(t *testing.T) {
t.Run("without recipient", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost, "/pss/send/testtopic/12", http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(payload)),
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Message: "OK",
......@@ -391,7 +424,7 @@ func newMockPss(f pssSendFn) *mpss {
}
// Send arbitrary byte slice with the given topic to Targets.
func (m *mpss) Send(ctx context.Context, topic pss.Topic, payload []byte, recipient *ecdsa.PublicKey, targets pss.Targets) error {
func (m *mpss) Send(ctx context.Context, topic pss.Topic, payload []byte, _ postage.Stamper, recipient *ecdsa.PublicKey, targets pss.Targets) error {
chunk, err := pss.Wrap(ctx, topic, payload, recipient, targets)
if err != nil {
return err
......
......@@ -152,6 +152,27 @@ func (s *server) setupRouting() {
})),
)
handle("/stamps", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.postageGetStampsHandler),
})),
)
handle("/stamps/{id}", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.postageGetStampHandler),
})),
)
handle("/stamps/{amount}/{depth}", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"POST": http.HandlerFunc(s.postageCreateHandler),
})),
)
s.Handler = web.ChainHandlers(
httpaccess.NewHTTPAccessLogHandler(s.logger, logrus.InfoLevel, s.tracer, "api access"),
handlers.CompressHandler,
......
......@@ -13,6 +13,7 @@ import (
"github.com/ethersphere/bee/pkg/cac"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/soc"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/gorilla/mux"
......@@ -110,7 +111,6 @@ func (s *server) socUploadHandler(w http.ResponseWriter, r *http.Request) {
s.logger.Error("soc upload: invalid chunk")
jsonhttp.Unauthorized(w, "invalid chunk")
return
}
ctx := r.Context()
......@@ -127,7 +127,30 @@ func (s *server) socUploadHandler(w http.ResponseWriter, r *http.Request) {
jsonhttp.Conflict(w, "chunk already exists")
return
}
batch, err := requestPostageBatchId(r)
if err != nil {
s.logger.Debugf("soc upload: postage batch id: %v", err)
s.logger.Error("soc upload: postage batch id")
jsonhttp.BadRequest(w, "invalid postage batch id")
return
}
i, err := s.post.GetStampIssuer(batch)
if err != nil {
s.logger.Debugf("soc upload: postage batch issuer: %v", err)
s.logger.Error("soc upload: postage batch issue")
jsonhttp.BadRequest(w, "postage stamp issuer")
return
}
stamper := postage.NewStamper(i, s.signer)
stamp, err := stamper.Stamp(sch.Address())
if err != nil {
s.logger.Debugf("soc upload: stamp: %v", err)
s.logger.Error("soc upload: stamp error")
jsonhttp.InternalServerError(w, "stamp error")
return
}
sch = sch.WithStamp(stamp)
_, err = s.storer.Put(ctx, requestModePut(r), sch)
if err != nil {
s.logger.Debugf("soc upload: chunk write error: %v", err)
......
......@@ -16,6 +16,8 @@ import (
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/postage"
mockpost "github.com/ethersphere/bee/pkg/postage/mock"
"github.com/ethersphere/bee/pkg/soc"
testingsoc "github.com/ethersphere/bee/pkg/soc/testing"
statestore "github.com/ethersphere/bee/pkg/statestore/mock"
......@@ -30,10 +32,12 @@ func TestSOC(t *testing.T) {
mockStatestore = statestore.NewStateStore()
logger = logging.New(ioutil.Discard, 0)
tag = tags.NewTags(mockStatestore, logger)
mp = mockpost.New(mockpost.WithIssuer(postage.NewStampIssuer("", "", batchOk, 11, 10)))
mockStorer = mock.NewStorer()
client, _, _ = newTestServer(t, testServerOptions{
Storer: mockStorer,
Tags: tag,
Post: mp,
})
)
t.Run("cmpty data", func(t *testing.T) {
......@@ -94,6 +98,7 @@ func TestSOC(t *testing.T) {
s := testingsoc.GenerateMockSOC(t, testData)
jsonhttptest.Request(t, client, http.MethodPost, socResource(hex.EncodeToString(s.Owner), hex.EncodeToString(s.ID), hex.EncodeToString(s.Signature)), http.StatusCreated,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(s.WrappedChunk.Data())),
jsonhttptest.WithExpectedJSONResponse(api.SocPostResponse{
Reference: s.Address(),
......@@ -117,12 +122,14 @@ func TestSOC(t *testing.T) {
s := testingsoc.GenerateMockSOC(t, testData)
jsonhttptest.Request(t, client, http.MethodPost, socResource(hex.EncodeToString(s.Owner), hex.EncodeToString(s.ID), hex.EncodeToString(s.Signature)), http.StatusCreated,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(s.WrappedChunk.Data())),
jsonhttptest.WithExpectedJSONResponse(api.SocPostResponse{
Reference: s.Address(),
}),
)
jsonhttptest.Request(t, client, http.MethodPost, socResource(hex.EncodeToString(s.Owner), hex.EncodeToString(s.ID), hex.EncodeToString(s.Signature)), http.StatusConflict,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(s.WrappedChunk.Data())),
jsonhttptest.WithExpectedJSONResponse(
jsonhttp.StatusResponse{
......@@ -131,4 +138,35 @@ func TestSOC(t *testing.T) {
}),
)
})
t.Run("postage", func(t *testing.T) {
s := testingsoc.GenerateMockSOC(t, testData)
t.Run("err - bad batch", func(t *testing.T) {
hexbatch := hex.EncodeToString(batchInvalid)
jsonhttptest.Request(t, client, http.MethodPost, socResource(hex.EncodeToString(s.Owner), hex.EncodeToString(s.ID), hex.EncodeToString(s.Signature)), http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
jsonhttptest.WithRequestBody(bytes.NewReader(s.WrappedChunk.Data())),
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Message: "invalid postage batch id",
Code: http.StatusBadRequest,
}))
})
t.Run("ok batch", func(t *testing.T) {
s := testingsoc.GenerateMockSOC(t, testData)
hexbatch := hex.EncodeToString(batchOk)
jsonhttptest.Request(t, client, http.MethodPost, socResource(hex.EncodeToString(s.Owner), hex.EncodeToString(s.ID), hex.EncodeToString(s.Signature)), http.StatusCreated,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
jsonhttptest.WithRequestBody(bytes.NewReader(s.WrappedChunk.Data())),
)
})
t.Run("err - batch empty", func(t *testing.T) {
s := testingsoc.GenerateMockSOC(t, testData)
hexbatch := hex.EncodeToString(batchEmpty)
jsonhttptest.Request(t, client, http.MethodPost, socResource(hex.EncodeToString(s.Owner), hex.EncodeToString(s.ID), hex.EncodeToString(s.Signature)), http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, hexbatch),
jsonhttptest.WithRequestBody(bytes.NewReader(s.WrappedChunk.Data())),
)
})
})
}
......@@ -14,6 +14,7 @@ import (
"testing"
"github.com/ethersphere/bee/pkg/logging"
mockpost "github.com/ethersphere/bee/pkg/postage/mock"
statestore "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/api"
......@@ -47,6 +48,7 @@ func TestTags(t *testing.T) {
Storer: mock.NewStorer(),
Tags: tag,
Logger: logger,
Post: mockpost.New(mockpost.WithAcceptAll()),
})
)
......@@ -105,11 +107,13 @@ func TestTags(t *testing.T) {
)
_ = jsonhttptest.Request(t, client, http.MethodPost, chunksResource, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(chunk.Data())),
jsonhttptest.WithExpectedJSONResponse(api.ChunkAddressResponse{Reference: chunk.Address()}),
)
rcvdHeaders := jsonhttptest.Request(t, client, http.MethodPost, chunksResource, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(chunk.Data())),
jsonhttptest.WithExpectedJSONResponse(api.ChunkAddressResponse{Reference: chunk.Address()}),
jsonhttptest.WithRequestHeader(api.SwarmTagHeader, strconv.FormatUint(uint64(tr.Uid), 10)),
......@@ -229,6 +233,7 @@ func TestTags(t *testing.T) {
// upload content with tag
jsonhttptest.Request(t, client, http.MethodPost, chunksResource, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(chunk.Data())),
jsonhttptest.WithRequestHeader(api.SwarmTagHeader, fmt.Sprint(tagId)),
)
......@@ -268,6 +273,7 @@ func TestTags(t *testing.T) {
respHeaders := jsonhttptest.Request(t, client, http.MethodPost,
bzzResource+"?name=somefile", http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader([]byte("some data"))),
jsonhttptest.WithExpectedJSONResponse(expectedResponse),
jsonhttptest.WithRequestHeader("Content-Type", "application/octet-stream"),
......@@ -290,6 +296,7 @@ func TestTags(t *testing.T) {
expectedResponse := api.BzzUploadResponse{Reference: expectedHash}
respHeaders := jsonhttptest.Request(t, client, http.MethodPost, bzzResource, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(tarReader),
jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
jsonhttptest.WithExpectedJSONResponse(expectedResponse),
......@@ -307,6 +314,7 @@ func TestTags(t *testing.T) {
// create a tag using the API
tr := api.TagResponse{}
jsonhttptest.Request(t, client, http.MethodPost, tagsResource, http.StatusCreated,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithJSONRequestBody(api.TagRequest{}),
jsonhttptest.WithUnmarshalJSONResponse(&tr),
)
......@@ -327,6 +335,7 @@ func TestTags(t *testing.T) {
copy(content[:swarm.ChunkSize], dataChunk)
rcvdHeaders := jsonhttptest.Request(t, client, http.MethodPost, bytesResource, http.StatusOK,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(content)),
jsonhttptest.WithExpectedJSONResponse(fileUploadResponse{
Reference: rootAddress,
......
......@@ -17,6 +17,7 @@ import (
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/pingpong"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/settlement"
"github.com/ethersphere/bee/pkg/settlement/swap"
"github.com/ethersphere/bee/pkg/settlement/swap/chequebook"
......@@ -47,6 +48,7 @@ type Service struct {
chequebookEnabled bool
chequebook chequebook.Service
swap swap.ApiInterface
batchStore postage.Storer
corsAllowedOrigins []string
metricsRegistry *prometheus.Registry
lightNodes *lightnode.Container
......@@ -78,7 +80,7 @@ func New(overlay swarm.Address, publicKey, pssPublicKey ecdsa.PublicKey, ethereu
// Configure injects required dependencies and configuration parameters and
// constructs HTTP routes that depend on them. It is intended and safe to call
// this method only once.
func (s *Service) Configure(p2p p2p.DebugService, pingpong pingpong.Interface, topologyDriver topology.Driver, lightNodes *lightnode.Container, storer storage.Storer, tags *tags.Tags, accounting accounting.Interface, settlement settlement.Interface, chequebookEnabled bool, swap swap.ApiInterface, chequebook chequebook.Service) {
func (s *Service) Configure(p2p p2p.DebugService, pingpong pingpong.Interface, topologyDriver topology.Driver, lightNodes *lightnode.Container, storer storage.Storer, tags *tags.Tags, accounting accounting.Interface, settlement settlement.Interface, chequebookEnabled bool, swap swap.ApiInterface, chequebook chequebook.Service, batchStore postage.Storer) {
s.p2p = p2p
s.pingpong = pingpong
s.topologyDriver = topologyDriver
......@@ -90,6 +92,7 @@ func (s *Service) Configure(p2p p2p.DebugService, pingpong pingpong.Interface, t
s.chequebook = chequebook
s.swap = swap
s.lightNodes = lightNodes
s.batchStore = batchStore
s.setRouter(s.newRouter())
}
......
......@@ -23,6 +23,7 @@ import (
"github.com/ethersphere/bee/pkg/logging"
p2pmock "github.com/ethersphere/bee/pkg/p2p/mock"
"github.com/ethersphere/bee/pkg/pingpong"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/resolver"
chequebookmock "github.com/ethersphere/bee/pkg/settlement/swap/chequebook/mock"
swapmock "github.com/ethersphere/bee/pkg/settlement/swap/mock"
......@@ -51,6 +52,7 @@ type testServerOptions struct {
SettlementOpts []swapmock.Option
ChequebookOpts []chequebookmock.Option
SwapOpts []swapmock.Option
BatchStore postage.Storer
}
type testServer struct {
......@@ -66,7 +68,7 @@ func newTestServer(t *testing.T, o testServerOptions) *testServer {
swapserv := swapmock.NewApiInterface(o.SwapOpts...)
ln := lightnode.NewContainer()
s := debugapi.New(o.Overlay, o.PublicKey, o.PSSPublicKey, o.EthereumAddress, logging.New(ioutil.Discard, 0), nil, o.CORSAllowedOrigins)
s.Configure(o.P2P, o.Pingpong, topologyDriver, ln, o.Storer, o.Tags, acc, settlement, true, swapserv, chequebook)
s.Configure(o.P2P, o.Pingpong, topologyDriver, ln, o.Storer, o.Tags, acc, settlement, true, swapserv, chequebook, o.BatchStore)
ts := httptest.NewServer(s)
t.Cleanup(ts.Close)
......@@ -164,7 +166,7 @@ func TestServer_Configure(t *testing.T) {
}),
)
s.Configure(o.P2P, o.Pingpong, topologyDriver, ln, o.Storer, o.Tags, acc, settlement, true, swapserv, chequebook)
s.Configure(o.P2P, o.Pingpong, topologyDriver, ln, o.Storer, o.Tags, acc, settlement, true, swapserv, chequebook, nil)
testBasicRouter(t, client)
jsonhttptest.Request(t, client, http.MethodGet, "/readiness", http.StatusOK,
......
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package debugapi
import (
"net/http"
"github.com/ethersphere/bee/pkg/jsonhttp"
)
func (s *Service) reserveStateHandler(w http.ResponseWriter, r *http.Request) {
jsonhttp.OK(w, s.batchStore.GetReserveState())
}
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package debugapi_test
import (
"net/http"
"testing"
"github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/postage/batchstore/mock"
)
func TestReservestate(t *testing.T) {
ts := newTestServer(t, testServerOptions{
BatchStore: mock.New(mock.WithReserveState(&postage.Reservestate{
Radius: 5,
})),
})
t.Run("ok", func(t *testing.T) {
jsonhttptest.Request(t, ts.Client, http.MethodGet, "/reservestate", http.StatusOK,
jsonhttptest.WithExpectedJSONResponse(&postage.Reservestate{
Radius: 5,
}),
)
})
}
......@@ -77,6 +77,10 @@ func (s *Service) newRouter() *mux.Router {
"POST": http.HandlerFunc(s.pingpongHandler),
})
router.Handle("/reservestate", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.reserveStateHandler),
})
router.Handle("/connect/{multi-address:.+}", jsonhttp.MethodHandler{
"POST": http.HandlerFunc(s.peerConnectHandler),
})
......
......@@ -102,7 +102,7 @@ func (f *Feed) Update(index Index) *Update {
}
// NewUpdate creates an update from an index, timestamp, payload and signature
func NewUpdate(f *Feed, idx Index, timestamp int64, payload []byte, sig []byte) (swarm.Chunk, error) {
func NewUpdate(f *Feed, idx Index, timestamp int64, payload, sig []byte) (swarm.Chunk, error) {
id, err := f.Update(idx).Id()
if err != nil {
return nil, fmt.Errorf("update: %w", err)
......
......@@ -37,7 +37,7 @@ func (t *Timeout) Get(ctx context.Context, mode storage.ModeGet, addr swarm.Addr
}
return ch, err
}
time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond)
time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) // skipcq: GSC-G404
return ch, nil
}
......@@ -142,7 +142,7 @@ func TestFinderIntervals(t *testing.T, nextf func() (bool, int64), finderf func(
for j := 0; j < len(ats)-1; j++ {
at := ats[j]
diff := ats[j+1] - at
for now := at; now < ats[j+1]; now += int64(rand.Intn(int(diff)) + 1) {
for now := at; now < ats[j+1]; now += int64(rand.Intn(int(diff)) + 1) { // skipcq: GSC-G404
after := int64(0)
ch, current, next, err := finder.At(ctx, now, after)
if err != nil {
......@@ -198,7 +198,7 @@ func TestFinderRandomIntervals(t *testing.T, finderf func(storage.Getter, *feeds
var i int64
var n int
nextf := func() (bool, int64) {
i += int64(rand.Intn(1<<10) + 1)
i += int64(rand.Intn(1<<10) + 1) // skipcq: GSC-G404
n++
return n == 40, i
}
......
......@@ -25,6 +25,7 @@ import (
"io/ioutil"
"sync"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
......@@ -35,7 +36,7 @@ const (
// about exported data format version
exportVersionFilename = ".swarm-export-version"
// current export format version
currentExportVersion = "1"
currentExportVersion = "2"
)
// Export writes a tar structured data to the writer of
......@@ -61,12 +62,18 @@ func (db *DB) Export(w io.Writer) (count int64, err error) {
hdr := &tar.Header{
Name: hex.EncodeToString(item.Address),
Mode: 0644,
Size: int64(len(item.Data)),
Size: int64(postage.StampSize + len(item.Data)),
}
if err := tw.WriteHeader(hdr); err != nil {
return false, err
}
if _, err := tw.Write(item.BatchID); err != nil {
return false, err
}
if _, err := tw.Write(item.Sig); err != nil {
return false, err
}
if _, err := tw.Write(item.Data); err != nil {
return false, err
}
......@@ -132,19 +139,28 @@ func (db *DB) Import(ctx context.Context, r io.Reader) (count int64, err error)
continue
}
data, err := ioutil.ReadAll(tr)
rawdata, err := ioutil.ReadAll(tr)
if err != nil {
select {
case errC <- err:
case <-ctx.Done():
}
}
stamp := new(postage.Stamp)
err = stamp.UnmarshalBinary(rawdata[:postage.StampSize])
if err != nil {
select {
case errC <- err:
case <-ctx.Done():
}
}
data := rawdata[postage.StampSize:]
key := swarm.NewAddress(keybytes)
var ch swarm.Chunk
switch version {
case currentExportVersion:
ch = swarm.NewChunk(key, data)
ch = swarm.NewChunk(key, data).WithStamp(stamp)
default:
select {
case errC <- fmt.Errorf("unsupported export data version %q", version):
......
......@@ -41,7 +41,11 @@ func TestExportImport(t *testing.T) {
if err != nil {
t.Fatal(err)
}
chunks[ch.Address().String()] = ch.Data()
stamp, err := ch.Stamp().MarshalBinary()
if err != nil {
t.Fatal(err)
}
chunks[ch.Address().String()] = append(stamp, ch.Data()...)
}
var buf bytes.Buffer
......@@ -71,9 +75,13 @@ func TestExportImport(t *testing.T) {
if err != nil {
t.Fatal(err)
}
got := ch.Data()
stamp, err := ch.Stamp().MarshalBinary()
if err != nil {
t.Fatal(err)
}
got := append(stamp, ch.Data()...)
if !bytes.Equal(got, want) {
t.Fatalf("chunk %s: got data %x, want %x", addr, got, want)
t.Fatalf("chunk %s: got stamp+data %x, want %x", addr, got, want)
}
}
}
......@@ -18,7 +18,6 @@ package localstore
import (
"errors"
"fmt"
"time"
"github.com/ethersphere/bee/pkg/shed"
......@@ -101,13 +100,6 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
db.batchMu.Unlock()
}()
// run through the recently pinned chunks and
// remove them from the gcIndex before iterating through gcIndex
err = db.removeChunksInExcludeIndexFromGC()
if err != nil {
return 0, true, fmt.Errorf("remove chunks in exclude index: %v", err)
}
gcSize, err := db.gcSize.Get()
if err != nil {
return 0, true, err
......@@ -185,6 +177,10 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
if err != nil {
return 0, false, err
}
err = db.postageChunksIndex.DeleteInBatch(batch, item)
if err != nil {
return 0, false, err
}
}
if gcSize-collectedCount > target {
done = false
......@@ -201,76 +197,6 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
return collectedCount, done, nil
}
// removeChunksInExcludeIndexFromGC removed any recently chunks in the exclude Index, from the gcIndex.
func (db *DB) removeChunksInExcludeIndexFromGC() (err error) {
db.metrics.GCExcludeCounter.Inc()
defer totalTimeMetric(db.metrics.TotalTimeGCExclude, time.Now())
defer func() {
if err != nil {
db.metrics.GCExcludeError.Inc()
}
}()
batch := new(leveldb.Batch)
excludedCount := 0
var gcSizeChange int64
err = db.gcExcludeIndex.Iterate(func(item shed.Item) (stop bool, err error) {
// Get access timestamp
retrievalAccessIndexItem, err := db.retrievalAccessIndex.Get(item)
if err != nil {
return false, err
}
item.AccessTimestamp = retrievalAccessIndexItem.AccessTimestamp
// Get the binId
retrievalDataIndexItem, err := db.retrievalDataIndex.Get(item)
if err != nil {
return false, err
}
item.BinID = retrievalDataIndexItem.BinID
// Check if this item is in gcIndex and remove it
ok, err := db.gcIndex.Has(item)
if err != nil {
return false, nil
}
if ok {
err = db.gcIndex.DeleteInBatch(batch, item)
if err != nil {
return false, nil
}
if _, err := db.gcIndex.Get(item); err == nil {
gcSizeChange--
}
excludedCount++
err = db.gcExcludeIndex.DeleteInBatch(batch, item)
if err != nil {
return false, nil
}
}
return false, nil
}, nil)
if err != nil {
return err
}
// update the gc size based on the no of entries deleted in gcIndex
err = db.incGCSizeInBatch(batch, gcSizeChange)
if err != nil {
return err
}
db.metrics.GCExcludeCounter.Add(float64(excludedCount))
err = db.shed.WriteBatch(batch)
if err != nil {
db.metrics.GCExcludeWriteBatchError.Inc()
return err
}
return nil
}
// gcTrigger retruns the absolute value for garbage collection
// target value, calculated from db.capacity and gcTargetRatio.
func (db *DB) gcTarget() (target uint64) {
......@@ -331,3 +257,5 @@ var testHookCollectGarbage func(collectedCount uint64)
// when the GC is done collecting candidate items for
// eviction.
var testHookGCIteratorDone func()
var withinRadiusFn func(*DB, shed.Item) bool
......@@ -73,29 +73,33 @@ func testDBCollectGarbageWorker(t *testing.T) {
case <-closed:
}
}))
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return false }))
db := newTestDB(t, &Options{
Capacity: 100,
})
closed = db.close
addrs := make([]swarm.Address, 0)
addrs := make([]swarm.Address, chunkCount)
ctx := context.Background()
// upload random chunks
for i := 0; i < chunkCount; i++ {
ch := generateTestRandomChunk()
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, ch)
_, err := db.Put(ctx, storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
err = db.Set(context.Background(), storage.ModeSetSync, ch.Address())
err = db.Set(ctx, storage.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
addrs = append(addrs, ch.Address())
addrs[i] = ch.Address()
}
gcTarget := db.gcTarget()
......@@ -150,7 +154,6 @@ func testDBCollectGarbageWorker(t *testing.T) {
// Pin a file, upload chunks to go past the gc limit to trigger GC,
// check if the pinned files are still around and removed from gcIndex
func TestPinGC(t *testing.T) {
chunkCount := 150
pinChunksCount := 50
dbCapacity := uint64(100)
......@@ -171,6 +174,7 @@ func TestPinGC(t *testing.T) {
case <-closed:
}
}))
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return false }))
db := newTestDB(t, &Options{
Capacity: dbCapacity,
......@@ -183,6 +187,10 @@ func TestPinGC(t *testing.T) {
// upload random chunks
for i := 0; i < chunkCount; i++ {
ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, ch)
mode := storage.ModePutUpload
if i < pinChunksCount {
......@@ -221,8 +229,6 @@ func TestPinGC(t *testing.T) {
t.Run("pin Index count", newItemsCountTest(db.pinIndex, pinChunksCount))
t.Run("gc exclude index count", newItemsCountTest(db.gcExcludeIndex, pinChunksCount))
t.Run("pull index count", newItemsCountTest(db.pullIndex, int(gcTarget)+pinChunksCount))
t.Run("gc index count", newItemsCountTest(db.gcIndex, int(gcTarget)))
......@@ -277,6 +283,10 @@ func TestGCAfterPin(t *testing.T) {
// upload random chunks
for i := 0; i < chunkCount; i++ {
ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, ch)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
......@@ -298,8 +308,6 @@ func TestGCAfterPin(t *testing.T) {
t.Run("pin Index count", newItemsCountTest(db.pinIndex, chunkCount))
t.Run("gc exclude index count", newItemsCountTest(db.gcExcludeIndex, chunkCount))
t.Run("gc index count", newItemsCountTest(db.gcIndex, int(0)))
for _, hash := range pinAddrs {
......@@ -314,10 +322,6 @@ func TestGCAfterPin(t *testing.T) {
// to test garbage collection runs by uploading, syncing and
// requesting a number of chunks.
func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
db := newTestDB(t, &Options{
Capacity: 100,
})
testHookCollectGarbageChan := make(chan uint64)
defer setTestHookCollectGarbage(func(collectedCount uint64) {
// don't trigger if we haven't collected anything - this may
......@@ -330,11 +334,21 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
testHookCollectGarbageChan <- collectedCount
})()
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return false }))
db := newTestDB(t, &Options{
Capacity: 100,
})
addrs := make([]swarm.Address, 0)
// upload random chunks just up to the capacity
for i := 0; i < int(db.capacity)-1; i++ {
ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, ch)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
......@@ -357,7 +371,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
close(testHookUpdateGCChan)
})
// request the latest synced chunk
// request the oldest synced chunk
// to prioritize it in the gc index
// not to be collected
_, err := db.Get(context.Background(), storage.ModeGetRequest, addrs[0])
......@@ -379,6 +393,11 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
// upload and sync another chunk to trigger
// garbage collection
ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, ch)
_, err = db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
......@@ -468,6 +487,10 @@ func TestDB_gcSize(t *testing.T) {
for i := 0; i < count; i++ {
ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, ch)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
......@@ -502,6 +525,13 @@ func setTestHookCollectGarbage(h func(collectedCount uint64)) (reset func()) {
return reset
}
func setWithinRadiusFunc(h func(*DB, shed.Item) bool) (reset func()) {
current := withinRadiusFn
reset = func() { withinRadiusFn = current }
withinRadiusFn = h
return reset
}
// TestSetTestHookCollectGarbage tests if setTestHookCollectGarbage changes
// testHookCollectGarbage function correctly and if its reset function
// resets the original function.
......@@ -556,6 +586,7 @@ func TestSetTestHookCollectGarbage(t *testing.T) {
}
func TestPinAfterMultiGC(t *testing.T) {
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return false }))
db := newTestDB(t, &Options{
Capacity: 10,
})
......@@ -565,6 +596,11 @@ func TestPinAfterMultiGC(t *testing.T) {
// upload random chunks above db capacity to see if chunks are still pinned
for i := 0; i < 20; i++ {
ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, ch)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
......@@ -581,6 +617,11 @@ func TestPinAfterMultiGC(t *testing.T) {
}
for i := 0; i < 20; i++ {
ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, ch)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
......@@ -592,6 +633,11 @@ func TestPinAfterMultiGC(t *testing.T) {
}
for i := 0; i < 20; i++ {
ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, ch)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
......@@ -622,21 +668,26 @@ func TestPinAfterMultiGC(t *testing.T) {
func generateAndPinAChunk(t *testing.T, db *DB) swarm.Chunk {
// Create a chunk and pin it
pinnedChunk := generateTestRandomChunk()
ch := generateTestRandomChunk()
_, err := db.Put(context.Background(), storage.ModePutUpload, pinnedChunk)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
err = db.Set(context.Background(), storage.ModeSetPin, pinnedChunk.Address())
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, ch)
err = db.Set(context.Background(), storage.ModeSetPin, ch.Address())
if err != nil {
t.Fatal(err)
}
err = db.Set(context.Background(), storage.ModeSetSync, pinnedChunk.Address())
err = db.Set(context.Background(), storage.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
return pinnedChunk
return ch
}
func TestPinSyncAndAccessPutSetChunkMultipleTimes(t *testing.T) {
......@@ -716,6 +767,8 @@ func addRandomChunks(t *testing.T, count int, db *DB, pin bool) []swarm.Chunk {
var chunks []swarm.Chunk
for i := 0; i < count; i++ {
ch := generateTestRandomChunk()
unreserveChunkBatch(t, db, 0, ch)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
......@@ -750,6 +803,7 @@ func addRandomChunks(t *testing.T, count int, db *DB, pin bool) []swarm.Chunk {
func TestGC_NoEvictDirty(t *testing.T) {
// lower the maximal number of chunks in a single
// gc batch to ensure multiple batches.
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return false }))
defer func(s uint64) { gcBatchSize = s }(gcBatchSize)
gcBatchSize = 1
......@@ -810,6 +864,7 @@ func TestGC_NoEvictDirty(t *testing.T) {
// upload random chunks
for i := 0; i < chunkCount; i++ {
ch := generateTestRandomChunk()
unreserveChunkBatch(t, db, 0, ch)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
......@@ -879,7 +934,6 @@ func TestGC_NoEvictDirty(t *testing.T) {
t.Fatal(err)
}
})
}
// setTestHookGCIteratorDone sets testHookGCIteratorDone and
......@@ -891,3 +945,13 @@ func setTestHookGCIteratorDone(h func()) (reset func()) {
testHookGCIteratorDone = h
return reset
}
func unreserveChunkBatch(t *testing.T, db *DB, radius uint8, chs ...swarm.Chunk) {
t.Helper()
for _, ch := range chs {
err := db.UnreserveBatch(ch.Stamp().BatchID(), radius)
if err != nil {
t.Fatal(err)
}
}
}
......@@ -22,6 +22,7 @@ import (
"math/rand"
"testing"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
)
......@@ -78,6 +79,7 @@ func TestDB_pullIndex(t *testing.T) {
// a chunk with and performing operations using synced, access and
// request modes.
func TestDB_gcIndex(t *testing.T) {
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return false }))
db := newTestDB(t, nil)
chunkCount := 50
......@@ -87,6 +89,10 @@ func TestDB_gcIndex(t *testing.T) {
// upload random chunks
for i := 0; i < chunkCount; i++ {
ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, ch)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
......
......@@ -25,6 +25,7 @@ import (
"time"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
......@@ -85,12 +86,15 @@ type DB struct {
// garbage collection index
gcIndex shed.Index
// garbage collection exclude index for pinned contents
gcExcludeIndex shed.Index
// pin files Index
pinIndex shed.Index
// postage chunks index
postageChunksIndex shed.Index
// postage chunks index
postageRadiusIndex shed.Index
// field that stores number of intems in gc index
gcSize shed.Uint64Field
......@@ -213,6 +217,10 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
DisableSeeksCompaction: o.DisableSeeksCompaction,
}
if withinRadiusFn == nil {
withinRadiusFn = withinRadius
}
db.shed, err = shed.NewDB(path, shedOpts)
if err != nil {
return nil, err
......@@ -248,7 +256,8 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
}
// Index storing actual chunk address, data and bin id.
db.retrievalDataIndex, err = db.shed.NewIndex("Address->StoreTimestamp|BinID|Data", shed.IndexFuncs{
headerSize := 16 + postage.StampSize
db.retrievalDataIndex, err = db.shed.NewIndex("Address->StoreTimestamp|BinID|BatchID|Sig|Data", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
return fields.Address, nil
},
......@@ -257,16 +266,27 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
b := make([]byte, 16)
b := make([]byte, headerSize)
binary.BigEndian.PutUint64(b[:8], fields.BinID)
binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
stamp, err := postage.NewStamp(fields.BatchID, fields.Sig).MarshalBinary()
if err != nil {
return nil, err
}
copy(b[16:], stamp)
value = append(b, fields.Data...)
return value, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[8:16]))
e.BinID = binary.BigEndian.Uint64(value[:8])
e.Data = value[16:]
stamp := new(postage.Stamp)
if err = stamp.UnmarshalBinary(value[16:headerSize]); err != nil {
return e, err
}
e.BatchID = stamp.BatchID()
e.Sig = stamp.Sig()
e.Data = value[headerSize:]
return e, nil
},
})
......@@ -297,9 +317,9 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
return nil, err
}
// pull index allows history and live syncing per po bin
db.pullIndex, err = db.shed.NewIndex("PO|BinID->Hash|Tag", shed.IndexFuncs{
db.pullIndex, err = db.shed.NewIndex("PO|BinID->Hash", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
key = make([]byte, 41)
key = make([]byte, 9)
key[0] = db.po(swarm.NewAddress(fields.Address))
binary.BigEndian.PutUint64(key[1:9], fields.BinID)
return key, nil
......@@ -309,20 +329,14 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
value = make([]byte, 36) // 32 bytes address, 4 bytes tag
value = make([]byte, 64) // 32 bytes address, 32 bytes batch id
copy(value, fields.Address)
if fields.Tag != 0 {
binary.BigEndian.PutUint32(value[32:], fields.Tag)
}
copy(value[32:], fields.BatchID)
return value, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
e.Address = value[:32]
if len(value) > 32 {
e.Tag = binary.BigEndian.Uint32(value[32:])
}
e.BatchID = value[32:64]
return e, nil
},
})
......@@ -367,7 +381,7 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
// create a push syncing triggers used by SubscribePush function
db.pushTriggers = make([]chan<- struct{}, 0)
// gc index for removable chunk ordered by ascending last access time
db.gcIndex, err = db.shed.NewIndex("AccessTimestamp|BinID|Hash->nil", shed.IndexFuncs{
db.gcIndex, err = db.shed.NewIndex("AccessTimestamp|BinID|Hash->BatchID", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
b := make([]byte, 16, 16+len(fields.Address))
binary.BigEndian.PutUint64(b[:8], uint64(fields.AccessTimestamp))
......@@ -382,9 +396,14 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
return nil, nil
value = make([]byte, 32)
copy(value, fields.BatchID)
return value, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
e = keyItem
e.BatchID = make([]byte, 32)
copy(e.BatchID, value)
return e, nil
},
})
......@@ -415,13 +434,17 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
return nil, err
}
// Create a index structure for excluding pinned chunks from gcIndex
db.gcExcludeIndex, err = db.shed.NewIndex("Hash->nil", shed.IndexFuncs{
db.postageChunksIndex, err = db.shed.NewIndex("BatchID|PO|Hash->nil", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
return fields.Address, nil
key = make([]byte, 65)
copy(key[:32], fields.BatchID)
key[32] = db.po(swarm.NewAddress(fields.Address))
copy(key[33:], fields.Address)
return key, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.Address = key
e.BatchID = key[:32]
e.Address = key[33:65]
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
......@@ -435,6 +458,28 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
return nil, err
}
db.postageRadiusIndex, err = db.shed.NewIndex("BatchID->Radius", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
key = make([]byte, 32)
copy(key[:32], fields.BatchID)
return key, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.BatchID = key[:32]
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
return []byte{fields.Radius}, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
e.Radius = value[0]
return e, nil
},
})
if err != nil {
return nil, err
}
// start garbage collection worker
go db.collectGarbageWorker()
return db, nil
......@@ -485,8 +530,9 @@ func (db *DB) DebugIndices() (indexInfo map[string]int, err error) {
"pushIndex": db.pushIndex,
"pullIndex": db.pullIndex,
"gcIndex": db.gcIndex,
"gcExcludeIndex": db.gcExcludeIndex,
"pinIndex": db.pinIndex,
"postageChunksIndex": db.postageChunksIndex,
"postageRadiusIndex": db.postageRadiusIndex,
} {
indexSize, err := v.Count()
if err != nil {
......@@ -509,6 +555,10 @@ func chunkToItem(ch swarm.Chunk) shed.Item {
Address: ch.Address().Bytes(),
Data: ch.Data(),
Tag: ch.TagID(),
BatchID: ch.Stamp().BatchID(),
Sig: ch.Stamp().Sig(),
Depth: ch.Depth(),
Radius: ch.Radius(),
}
}
......
......@@ -30,6 +30,7 @@ import (
"time"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
chunktesting "github.com/ethersphere/bee/pkg/storage/testing"
......@@ -171,8 +172,9 @@ func newTestDB(t testing.TB, o *Options) *DB {
}
var (
generateTestRandomChunk = chunktesting.GenerateTestRandomChunk
generateTestRandomChunks = chunktesting.GenerateTestRandomChunks
generateTestRandomChunk = chunktesting.GenerateTestRandomChunk
generateTestRandomChunks = chunktesting.GenerateTestRandomChunks
generateTestRandomChunkAt = chunktesting.GenerateTestRandomChunkAt
)
// chunkAddresses return chunk addresses of provided chunks.
......@@ -251,7 +253,7 @@ func newRetrieveIndexesTest(db *DB, chunk swarm.Chunk, storeTimestamp, accessTim
if err != nil {
t.Fatal(err)
}
validateItem(t, item, chunk.Address().Bytes(), chunk.Data(), storeTimestamp, 0)
validateItem(t, item, chunk.Address().Bytes(), chunk.Data(), storeTimestamp, 0, chunk.Stamp())
// access index should not be set
wantErr := leveldb.ErrNotFound
......@@ -272,15 +274,14 @@ func newRetrieveIndexesTestWithAccess(db *DB, ch swarm.Chunk, storeTimestamp, ac
if err != nil {
t.Fatal(err)
}
validateItem(t, item, ch.Address().Bytes(), ch.Data(), storeTimestamp, 0)
if accessTimestamp > 0 {
item, err = db.retrievalAccessIndex.Get(addressToItem(ch.Address()))
item, err = db.retrievalAccessIndex.Get(item)
if err != nil {
t.Fatal(err)
}
validateItem(t, item, ch.Address().Bytes(), nil, 0, accessTimestamp)
}
validateItem(t, item, ch.Address().Bytes(), ch.Data(), storeTimestamp, accessTimestamp, ch.Stamp())
}
}
......@@ -298,7 +299,7 @@ func newPullIndexTest(db *DB, ch swarm.Chunk, binID uint64, wantError error) fun
t.Errorf("got error %v, want %v", err, wantError)
}
if err == nil {
validateItem(t, item, ch.Address().Bytes(), nil, 0, 0)
validateItem(t, item, ch.Address().Bytes(), nil, 0, 0, postage.NewStamp(ch.Stamp().BatchID(), nil))
}
}
}
......@@ -317,14 +318,14 @@ func newPushIndexTest(db *DB, ch swarm.Chunk, storeTimestamp int64, wantError er
t.Errorf("got error %v, want %v", err, wantError)
}
if err == nil {
validateItem(t, item, ch.Address().Bytes(), nil, storeTimestamp, 0)
validateItem(t, item, ch.Address().Bytes(), nil, storeTimestamp, 0, postage.NewStamp(nil, nil))
}
}
}
// newGCIndexTest returns a test function that validates if the right
// chunk values are in the GC index.
func newGCIndexTest(db *DB, chunk swarm.Chunk, storeTimestamp, accessTimestamp int64, binID uint64, wantError error) func(t *testing.T) {
func newGCIndexTest(db *DB, chunk swarm.Chunk, storeTimestamp, accessTimestamp int64, binID uint64, wantError error, stamp *postage.Stamp) func(t *testing.T) {
return func(t *testing.T) {
t.Helper()
......@@ -337,7 +338,7 @@ func newGCIndexTest(db *DB, chunk swarm.Chunk, storeTimestamp, accessTimestamp i
t.Errorf("got error %v, want %v", err, wantError)
}
if err == nil {
validateItem(t, item, chunk.Address().Bytes(), nil, 0, accessTimestamp)
validateItem(t, item, chunk.Address().Bytes(), nil, 0, accessTimestamp, stamp)
}
}
}
......@@ -355,7 +356,7 @@ func newPinIndexTest(db *DB, chunk swarm.Chunk, wantError error) func(t *testing
t.Errorf("got error %v, want %v", err, wantError)
}
if err == nil {
validateItem(t, item, chunk.Address().Bytes(), nil, 0, 0)
validateItem(t, item, chunk.Address().Bytes(), nil, 0, 0, postage.NewStamp(nil, nil))
}
}
}
......@@ -438,7 +439,7 @@ func testItemsOrder(t *testing.T, i shed.Index, chunks []testIndexChunk, sortFun
}
// validateItem is a helper function that checks Item values.
func validateItem(t *testing.T, item shed.Item, address, data []byte, storeTimestamp, accessTimestamp int64) {
func validateItem(t *testing.T, item shed.Item, address, data []byte, storeTimestamp, accessTimestamp int64, stamp swarm.Stamp) {
t.Helper()
if !bytes.Equal(item.Address, address) {
......@@ -453,6 +454,12 @@ func validateItem(t *testing.T, item shed.Item, address, data []byte, storeTimes
if item.AccessTimestamp != accessTimestamp {
t.Errorf("got item access timestamp %v, want %v", item.AccessTimestamp, accessTimestamp)
}
if !bytes.Equal(item.BatchID, stamp.BatchID()) {
t.Errorf("got batch ID %x, want %x", item.BatchID, stamp.BatchID())
}
if !bytes.Equal(item.Sig, stamp.Sig()) {
t.Errorf("got signature %x, want %x", item.Sig, stamp.Sig())
}
}
// setNow replaces now function and
......@@ -514,7 +521,7 @@ func TestSetNow(t *testing.T) {
}
}
func testIndexCounts(t *testing.T, pushIndex, pullIndex, gcIndex, gcExcludeIndex, pinIndex, retrievalDataIndex, retrievalAccessIndex int, indexInfo map[string]int) {
func testIndexCounts(t *testing.T, pushIndex, pullIndex, gcIndex, pinIndex, retrievalDataIndex, retrievalAccessIndex int, indexInfo map[string]int) {
t.Helper()
if indexInfo["pushIndex"] != pushIndex {
t.Fatalf("pushIndex count mismatch. got %d want %d", indexInfo["pushIndex"], pushIndex)
......@@ -528,10 +535,6 @@ func testIndexCounts(t *testing.T, pushIndex, pullIndex, gcIndex, gcExcludeIndex
t.Fatalf("gcIndex count mismatch. got %d want %d", indexInfo["gcIndex"], gcIndex)
}
if indexInfo["gcExcludeIndex"] != gcExcludeIndex {
t.Fatalf("gcExcludeIndex count mismatch. got %d want %d", indexInfo["gcExcludeIndex"], gcExcludeIndex)
}
if indexInfo["pinIndex"] != pinIndex {
t.Fatalf("pinIndex count mismatch. got %d want %d", indexInfo["pinIndex"], pinIndex)
}
......@@ -568,7 +571,7 @@ func TestDBDebugIndexes(t *testing.T) {
t.Fatal(err)
}
testIndexCounts(t, 1, 1, 0, 0, 0, 1, 0, indexCounts)
testIndexCounts(t, 1, 1, 0, 0, 1, 0, indexCounts)
// set the chunk for pinning and expect the index count to grow
err = db.Set(ctx, storage.ModeSetPin, ch.Address())
......@@ -582,5 +585,5 @@ func TestDBDebugIndexes(t *testing.T) {
}
// assert that there's a pin and gc exclude entry now
testIndexCounts(t, 1, 1, 0, 1, 1, 1, 0, indexCounts)
testIndexCounts(t, 1, 1, 0, 1, 1, 0, indexCounts)
}
......@@ -21,11 +21,11 @@ import (
"errors"
"time"
"github.com/syndtr/goleveldb/leveldb"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/syndtr/goleveldb/leveldb"
)
// Get returns a chunk from the database. If the chunk is
......@@ -50,7 +50,8 @@ func (db *DB) Get(ctx context.Context, mode storage.ModeGet, addr swarm.Address)
}
return nil, err
}
return swarm.NewChunk(swarm.NewAddress(out.Address), out.Data), nil
return swarm.NewChunk(swarm.NewAddress(out.Address), out.Data).
WithStamp(postage.NewStamp(out.BatchID, out.Sig)), nil
}
// get returns Item from the retrieval index
......@@ -152,25 +153,28 @@ func (db *DB) updateGC(item shed.Item) (err error) {
if err != nil {
return err
}
// update access timestamp
// update the gc item timestamp in case
// it exists
_, err = db.gcIndex.Get(item)
item.AccessTimestamp = now()
// update retrieve access index
err = db.retrievalAccessIndex.PutInBatch(batch, item)
if err != nil {
if err == nil {
err = db.gcIndex.PutInBatch(batch, item)
if err != nil {
return err
}
} else if !errors.Is(err, leveldb.ErrNotFound) {
return err
}
// if the item is not in the gc we don't
// update the gc index, since the item is
// in the reserve.
// add new entry to gc index ONLY if it is not present in pinIndex
ok, err := db.pinIndex.Has(item)
// update retrieve access index
err = db.retrievalAccessIndex.PutInBatch(batch, item)
if err != nil {
return err
}
if !ok {
err = db.gcIndex.PutInBatch(batch, item)
if err != nil {
return err
}
}
return db.shed.WriteBatch(batch)
}
......
......@@ -21,6 +21,7 @@ import (
"errors"
"time"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
......@@ -50,7 +51,8 @@ func (db *DB) GetMulti(ctx context.Context, mode storage.ModeGet, addrs ...swarm
}
chunks = make([]swarm.Chunk, len(out))
for i, ch := range out {
chunks[i] = swarm.NewChunk(swarm.NewAddress(ch.Address), ch.Data)
chunks[i] = swarm.NewChunk(swarm.NewAddress(ch.Address), ch.Data).
WithStamp(postage.NewStamp(ch.BatchID, ch.Sig))
}
return chunks, nil
}
......
......@@ -22,11 +22,14 @@ import (
"testing"
"time"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
)
// TestModeGetRequest validates ModeGetRequest index values on the provided DB.
func TestModeGetRequest(t *testing.T) {
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return false }))
db := newTestDB(t, nil)
uploadTimestamp := time.Now().UTC().UnixNano()
......@@ -35,6 +38,10 @@ func TestModeGetRequest(t *testing.T) {
})()
ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, ch)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
......@@ -98,8 +105,9 @@ func TestModeGetRequest(t *testing.T) {
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, uploadTimestamp, uploadTimestamp))
t.Run("gc index", newGCIndexTest(db, ch, uploadTimestamp, uploadTimestamp, 1, nil))
t.Run("gc index", newGCIndexTest(db, ch, uploadTimestamp, uploadTimestamp, 1, nil, postage.NewStamp(ch.Stamp().BatchID(), nil)))
t.Run("access count", newItemsCountTest(db.retrievalAccessIndex, 1))
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
t.Run("gc size", newIndexGCSizeTest(db))
......@@ -128,8 +136,9 @@ func TestModeGetRequest(t *testing.T) {
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, uploadTimestamp, accessTimestamp))
t.Run("gc index", newGCIndexTest(db, ch, uploadTimestamp, accessTimestamp, 1, nil))
t.Run("gc index", newGCIndexTest(db, ch, uploadTimestamp, accessTimestamp, 1, nil, postage.NewStamp(ch.Stamp().BatchID(), nil)))
t.Run("access count", newItemsCountTest(db.retrievalAccessIndex, 1))
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
t.Run("gc size", newIndexGCSizeTest(db))
......@@ -153,7 +162,7 @@ func TestModeGetRequest(t *testing.T) {
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, uploadTimestamp, uploadTimestamp))
t.Run("gc index", newGCIndexTest(db, ch, uploadTimestamp, uploadTimestamp, 1, nil))
t.Run("gc index", newGCIndexTest(db, ch, uploadTimestamp, uploadTimestamp, 1, nil, postage.NewStamp(ch.Stamp().BatchID(), nil)))
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
......
......@@ -91,25 +91,21 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e
binIDs := make(map[uint8]uint64)
switch mode {
case storage.ModePutRequest, storage.ModePutRequestPin:
case storage.ModePutRequest, storage.ModePutRequestPin, storage.ModePutRequestCache:
for i, ch := range chs {
if containsChunk(ch.Address(), chs[:i]...) {
exist[i] = true
continue
}
exists, c, err := db.putRequest(batch, binIDs, chunkToItem(ch))
item := chunkToItem(ch)
pin := mode == storage.ModePutRequestPin // force pin in this mode
cache := mode == storage.ModePutRequestCache // force cache
exists, c, err := db.putRequest(batch, binIDs, item, pin, cache)
if err != nil {
return nil, err
}
exist[i] = exists
gcSizeChange += c
if mode == storage.ModePutRequestPin {
err = db.setPin(batch, ch.Address())
if err != nil {
return nil, err
}
}
}
case storage.ModePutUpload, storage.ModePutUploadPin:
......@@ -118,7 +114,8 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e
exist[i] = true
continue
}
exists, c, err := db.putUpload(batch, binIDs, chunkToItem(ch))
item := chunkToItem(ch)
exists, c, err := db.putUpload(batch, binIDs, item)
if err != nil {
return nil, err
}
......@@ -131,11 +128,12 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e
}
gcSizeChange += c
if mode == storage.ModePutUploadPin {
err = db.setPin(batch, ch.Address())
c, err = db.setPin(batch, item)
if err != nil {
return nil, err
}
}
gcSizeChange += c
}
case storage.ModePutSync:
......@@ -189,12 +187,12 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e
// - it does not enter the syncpool
// The batch can be written to the database.
// Provided batch and binID map are updated.
func (db *DB) putRequest(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.Item) (exists bool, gcSizeChange int64, err error) {
has, err := db.retrievalDataIndex.Has(item)
func (db *DB) putRequest(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.Item, forcePin, forceCache bool) (exists bool, gcSizeChange int64, err error) {
exists, err = db.retrievalDataIndex.Has(item)
if err != nil {
return false, 0, err
}
if has {
if exists {
return true, 0, nil
}
......@@ -203,17 +201,35 @@ func (db *DB) putRequest(batch *leveldb.Batch, binIDs map[uint8]uint64, item she
if err != nil {
return false, 0, err
}
err = db.retrievalDataIndex.PutInBatch(batch, item)
if err != nil {
return false, 0, err
}
err = db.postageChunksIndex.PutInBatch(batch, item)
if err != nil {
return false, 0, err
}
gcSizeChange, err = db.setGC(batch, item)
item.AccessTimestamp = now()
err = db.retrievalAccessIndex.PutInBatch(batch, item)
if err != nil {
return false, 0, err
}
err = db.retrievalDataIndex.PutInBatch(batch, item)
gcSizeChange, err = db.preserveOrCache(batch, item, forcePin, forceCache)
if err != nil {
return false, 0, err
}
if !forceCache {
// if we are here it means the chunk has a valid stamp
// therefore we'd like to be able to pullsync it
err = db.pullIndex.PutInBatch(batch, item)
if err != nil {
return false, 0, err
}
}
return false, gcSizeChange, nil
}
......@@ -248,6 +264,10 @@ func (db *DB) putUpload(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed
return false, 0, err
}
err = db.postageChunksIndex.PutInBatch(batch, item)
if err != nil {
return false, 0, err
}
return false, 0, nil
}
......@@ -277,7 +297,18 @@ func (db *DB) putSync(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.I
if err != nil {
return false, 0, err
}
gcSizeChange, err = db.setGC(batch, item)
err = db.postageChunksIndex.PutInBatch(batch, item)
if err != nil {
return false, 0, err
}
item.AccessTimestamp = now()
err = db.retrievalAccessIndex.PutInBatch(batch, item)
if err != nil {
return false, 0, err
}
gcSizeChange, err = db.preserveOrCache(batch, item, false, false)
if err != nil {
return false, 0, err
}
......@@ -285,38 +316,20 @@ func (db *DB) putSync(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.I
return false, gcSizeChange, nil
}
// setGC is a helper function used to add chunks to the retrieval access
// index and the gc index in the cases that the putToGCCheck condition
// warrants a gc set. this is to mitigate index leakage in edge cases where
// a chunk is added to a node's localstore and given that the chunk is
// already within that node's NN (thus, it can be added to the gc index
// safely)
func (db *DB) setGC(batch *leveldb.Batch, item shed.Item) (gcSizeChange int64, err error) {
if item.BinID == 0 {
i, err := db.retrievalDataIndex.Get(item)
if err != nil {
return 0, err
}
item.BinID = i.BinID
}
i, err := db.retrievalAccessIndex.Get(item)
switch {
case err == nil:
item.AccessTimestamp = i.AccessTimestamp
err = db.gcIndex.DeleteInBatch(batch, item)
if err != nil {
return 0, err
}
gcSizeChange--
case errors.Is(err, leveldb.ErrNotFound):
// the chunk is not accessed before
default:
return 0, err
}
item.AccessTimestamp = now()
err = db.retrievalAccessIndex.PutInBatch(batch, item)
// preserveOrCache is a helper function used to add chunks to either a pinned reserve or gc cache
// (the retrieval access index and the gc index)
func (db *DB) preserveOrCache(batch *leveldb.Batch, item shed.Item, forcePin, forceCache bool) (gcSizeChange int64, err error) {
// item needs to be populated with Radius
item2, err := db.postageRadiusIndex.Get(item)
if err != nil {
return 0, err
// if there's an error, assume the chunk needs to be GCd
forceCache = true
} else {
item.Radius = item2.Radius
}
if !forceCache && (withinRadiusFn(db, item) || forcePin) {
return db.setPin(batch, item)
}
// add new entry to gc index ONLY if it is not present in pinIndex
......@@ -324,13 +337,21 @@ func (db *DB) setGC(batch *leveldb.Batch, item shed.Item) (gcSizeChange int64, e
if err != nil {
return 0, err
}
if !ok {
err = db.gcIndex.PutInBatch(batch, item)
if err != nil {
return 0, err
}
gcSizeChange++
if ok {
return gcSizeChange, nil
}
exists, err := db.gcIndex.Has(item)
if err != nil && !errors.Is(err, leveldb.ErrNotFound) {
return 0, err
}
if exists {
return 0, nil
}
err = db.gcIndex.PutInBatch(batch, item)
if err != nil {
return 0, err
}
gcSizeChange++
return gcSizeChange, nil
}
......
......@@ -24,6 +24,7 @@ import (
"testing"
"time"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/syndtr/goleveldb/leveldb"
......@@ -31,11 +32,16 @@ import (
// TestModePutRequest validates ModePutRequest index values on the provided DB.
func TestModePutRequest(t *testing.T) {
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return false }))
for _, tc := range multiChunkTestCases {
t.Run(tc.name, func(t *testing.T) {
db := newTestDB(t, nil)
chunks := generateTestRandomChunks(tc.count)
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, chunks...)
// keep the record when the chunk is stored
var storeTimestamp int64
......@@ -58,6 +64,7 @@ func TestModePutRequest(t *testing.T) {
}
newItemsCountTest(db.gcIndex, tc.count)(t)
newItemsCountTest(db.pullIndex, tc.count)(t)
newIndexGCSizeTest(db)(t)
})
......@@ -77,6 +84,7 @@ func TestModePutRequest(t *testing.T) {
}
newItemsCountTest(db.gcIndex, tc.count)(t)
newItemsCountTest(db.pullIndex, tc.count)(t)
newIndexGCSizeTest(db)(t)
})
})
......@@ -85,17 +93,21 @@ func TestModePutRequest(t *testing.T) {
// TestModePutRequestPin validates ModePutRequestPin index values on the provided DB.
func TestModePutRequestPin(t *testing.T) {
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return false }))
for _, tc := range multiChunkTestCases {
t.Run(tc.name, func(t *testing.T) {
db := newTestDB(t, nil)
chunks := generateTestRandomChunks(tc.count)
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, chunks...)
wantTimestamp := time.Now().UTC().UnixNano()
defer setNow(func() (t int64) {
return wantTimestamp
})()
_, err := db.Put(context.Background(), storage.ModePutRequestPin, chunks...)
if err != nil {
t.Fatal(err)
......@@ -106,6 +118,46 @@ func TestModePutRequestPin(t *testing.T) {
newPinIndexTest(db, ch, nil)(t)
}
// gc index should be always 0 since we're pinning
newItemsCountTest(db.gcIndex, 0)(t)
})
}
}
// TestModePutRequestCache validates ModePutRequestCache index values on the provided DB.
func TestModePutRequestCache(t *testing.T) {
// note: we set WithinRadius to be true, and verify that nevertheless
// the chunk lands in the cache
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return true }))
for _, tc := range multiChunkTestCases {
t.Run(tc.name, func(t *testing.T) {
db := newTestDB(t, nil)
var chunks []swarm.Chunk
for i := 0; i < tc.count; i++ {
chunk := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 2)
chunks = append(chunks, chunk)
}
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database. in the following case
// the radius is 2, and since chunk PO is 2, it falls within
// radius.
unreserveChunkBatch(t, db, 2, chunks...)
wantTimestamp := time.Now().UTC().UnixNano()
defer setNow(func() (t int64) {
return wantTimestamp
})()
_, err := db.Put(context.Background(), storage.ModePutRequestCache, chunks...)
if err != nil {
t.Fatal(err)
}
for _, ch := range chunks {
newRetrieveIndexesTestWithAccess(db, ch, wantTimestamp, wantTimestamp)(t)
newPinIndexTest(db, ch, leveldb.ErrNotFound)(t)
}
newItemsCountTest(db.gcIndex, tc.count)(t)
})
}
......@@ -113,6 +165,7 @@ func TestModePutRequestPin(t *testing.T) {
// TestModePutSync validates ModePutSync index values on the provided DB.
func TestModePutSync(t *testing.T) {
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return false }))
for _, tc := range multiChunkTestCases {
t.Run(tc.name, func(t *testing.T) {
db := newTestDB(t, nil)
......@@ -123,6 +176,10 @@ func TestModePutSync(t *testing.T) {
})()
chunks := generateTestRandomChunks(tc.count)
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, chunks...)
_, err := db.Put(context.Background(), storage.ModePutSync, chunks...)
if err != nil {
......@@ -141,6 +198,8 @@ func TestModePutSync(t *testing.T) {
newItemsCountTest(db.gcIndex, tc.count)(t)
newIndexGCSizeTest(db)(t)
}
newItemsCountTest(db.gcIndex, tc.count)(t)
newIndexGCSizeTest(db)(t)
})
}
}
......@@ -157,6 +216,10 @@ func TestModePutUpload(t *testing.T) {
})()
chunks := generateTestRandomChunks(tc.count)
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, chunks...)
_, err := db.Put(context.Background(), storage.ModePutUpload, chunks...)
if err != nil {
......@@ -190,6 +253,10 @@ func TestModePutUploadPin(t *testing.T) {
})()
chunks := generateTestRandomChunks(tc.count)
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, chunks...)
_, err := db.Put(context.Background(), storage.ModePutUploadPin, chunks...)
if err != nil {
......@@ -270,6 +337,11 @@ func TestModePutUpload_parallel(t *testing.T) {
go func() {
for i := 0; i < uploadsCount; i++ {
chs := generateTestRandomChunks(tc.count)
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, chunks...)
select {
case chunksChan <- chs:
case <-doneChan:
......@@ -308,8 +380,9 @@ func TestModePutUpload_parallel(t *testing.T) {
}
// TestModePut_sameChunk puts the same chunk multiple times
// and validates that all relevant indexes have only one item
// in them.
// and validates that all relevant indexes have the correct counts.
// The test assumes that chunk fall into the reserve part of
// the store.
func TestModePut_sameChunk(t *testing.T) {
for _, tc := range multiChunkTestCases {
t.Run(tc.name, func(t *testing.T) {
......@@ -324,6 +397,18 @@ func TestModePut_sameChunk(t *testing.T) {
{
name: "ModePutRequest",
mode: storage.ModePutRequest,
pullIndex: true,
pushIndex: false,
},
{
name: "ModePutRequestPin",
mode: storage.ModePutRequest,
pullIndex: true,
pushIndex: false,
},
{
name: "ModePutRequestCache",
mode: storage.ModePutRequestCache,
pullIndex: false,
pushIndex: false,
},
......@@ -342,6 +427,10 @@ func TestModePut_sameChunk(t *testing.T) {
} {
t.Run(tcn.name, func(t *testing.T) {
db := newTestDB(t, nil)
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, chunks...)
for i := 0; i < 10; i++ {
exist, err := db.Put(context.Background(), tcn.mode, chunks...)
......@@ -390,6 +479,7 @@ func TestPutDuplicateChunks(t *testing.T) {
db := newTestDB(t, nil)
ch := generateTestRandomChunk()
unreserveChunkBatch(t, db, 0, ch)
exist, err := db.Put(context.Background(), mode, ch, ch)
if err != nil {
......
This diff is collapsed.
......@@ -19,78 +19,12 @@ package localstore
import (
"context"
"errors"
"io/ioutil"
"testing"
"github.com/ethersphere/bee/pkg/logging"
statestore "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/tags"
tagtesting "github.com/ethersphere/bee/pkg/tags/testing"
"github.com/syndtr/goleveldb/leveldb"
)
// here we try to set a normal tag (that should be handled by pushsync)
// as a result we should expect the tag value to remain in the pull index
// and we expect that the tag should not be incremented by pull sync set
func TestModeSetSyncNormalTag(t *testing.T) {
mockStatestore := statestore.NewStateStore()
logger := logging.New(ioutil.Discard, 0)
db := newTestDB(t, &Options{Tags: tags.NewTags(mockStatestore, logger)})
tag, err := db.tags.Create(1)
if err != nil {
t.Fatal(err)
}
ch := generateTestRandomChunk().WithTagID(tag.Uid)
_, err = db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
err = tag.Inc(tags.StateStored) // so we don't get an error on tag.Status later on
if err != nil {
t.Fatal(err)
}
item, err := db.pullIndex.Get(shed.Item{
Address: ch.Address().Bytes(),
BinID: 1,
})
if err != nil {
t.Fatal(err)
}
if item.Tag != tag.Uid {
t.Fatalf("unexpected tag id value got %d want %d", item.Tag, tag.Uid)
}
err = db.Set(context.Background(), storage.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
item, err = db.pullIndex.Get(shed.Item{
Address: ch.Address().Bytes(),
BinID: 1,
})
if err != nil {
t.Fatal(err)
}
// expect the same tag Uid because when we set pull sync on a normal tag
// the tag Uid should remain untouched in pull index
if item.Tag != tag.Uid {
t.Fatalf("unexpected tag id value got %d want %d", item.Tag, tag.Uid)
}
// 1 stored (because incremented manually in test), 1 sent, 1 synced, 1 total
tagtesting.CheckTag(t, tag, 0, 1, 0, 1, 1, 1)
}
// TestModeSetRemove validates ModeSetRemove index values on the provided DB.
func TestModeSetRemove(t *testing.T) {
for _, tc := range multiChunkTestCases {
......
package localstore
import (
"errors"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/syndtr/goleveldb/leveldb"
)
// pinCounter returns the pin counter for a given swarm address, provided that the
// address has been pinned.
func (db *DB) pinCounter(address swarm.Address) (uint64, error) {
out, err := db.pinIndex.Get(shed.Item{
Address: address.Bytes(),
})
if err != nil {
if errors.Is(err, leveldb.ErrNotFound) {
return 0, storage.ErrNotFound
}
return 0, err
}
return out.PinCounter, nil
}
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package localstore
import (
"context"
"errors"
"testing"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
)
func TestPinCounter(t *testing.T) {
chunk := generateTestRandomChunk()
db := newTestDB(t, nil)
addr := chunk.Address()
ctx := context.Background()
_, err := db.Put(ctx, storage.ModePutUpload, chunk)
if err != nil {
t.Fatal(err)
}
var pinCounter uint64
t.Run("+1 after first pin", func(t *testing.T) {
err := db.Set(ctx, storage.ModeSetPin, addr)
if err != nil {
t.Fatal(err)
}
pinCounter, err = db.pinCounter(addr)
if err != nil {
t.Fatal(err)
}
if pinCounter != 1 {
t.Fatalf("want pin counter %d but got %d", 1, pinCounter)
}
})
t.Run("2 after second pin", func(t *testing.T) {
err = db.Set(ctx, storage.ModeSetPin, addr)
if err != nil {
t.Fatal(err)
}
pinCounter, err = db.pinCounter(addr)
if err != nil {
t.Fatal(err)
}
if pinCounter != 2 {
t.Fatalf("want pin counter %d but got %d", 2, pinCounter)
}
})
t.Run("1 after first unpin", func(t *testing.T) {
err = db.Set(ctx, storage.ModeSetUnpin, addr)
if err != nil {
t.Fatal(err)
}
pinCounter, err = db.pinCounter(addr)
if err != nil {
t.Fatal(err)
}
if pinCounter != 1 {
t.Fatalf("want pin counter %d but got %d", 1, pinCounter)
}
})
t.Run("not found after second unpin", func(t *testing.T) {
err = db.Set(ctx, storage.ModeSetUnpin, addr)
if err != nil {
t.Fatal(err)
}
_, err = db.pinCounter(addr)
if !errors.Is(err, storage.ErrNotFound) {
t.Fatal(err)
}
})
}
// Pin a file, upload chunks to go past the gc limit to trigger GC,
// check if the pinned files are still around and removed from gcIndex
func TestPinIndexes(t *testing.T) {
ctx := context.Background()
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return false }))
db := newTestDB(t, &Options{
Capacity: 150,
})
ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, ch)
addr := ch.Address()
_, err := db.Put(ctx, storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
runCountsTest(t, "putUpload", db, 1, 0, 1, 1, 0, 0)
err = db.Set(ctx, storage.ModeSetSync, addr)
if err != nil {
t.Fatal(err)
}
runCountsTest(t, "setSync", db, 1, 1, 0, 1, 0, 1)
err = db.Set(ctx, storage.ModeSetPin, addr)
if err != nil {
t.Fatal(err)
}
runCountsTest(t, "setPin", db, 1, 1, 0, 1, 1, 0)
err = db.Set(ctx, storage.ModeSetPin, addr)
if err != nil {
t.Fatal(err)
}
runCountsTest(t, "setPin 2", db, 1, 1, 0, 1, 1, 0)
err = db.Set(ctx, storage.ModeSetUnpin, addr)
if err != nil {
t.Fatal(err)
}
runCountsTest(t, "setUnPin", db, 1, 1, 0, 1, 1, 0)
err = db.Set(ctx, storage.ModeSetUnpin, addr)
if err != nil {
t.Fatal(err)
}
runCountsTest(t, "setUnPin 2", db, 1, 1, 0, 1, 0, 1)
}
func TestPinIndexesSync(t *testing.T) {
ctx := context.Background()
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return false }))
db := newTestDB(t, &Options{
Capacity: 150,
})
ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
// be inserted into the database
unreserveChunkBatch(t, db, 0, ch)
addr := ch.Address()
_, err := db.Put(ctx, storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
runCountsTest(t, "putUpload", db, 1, 0, 1, 1, 0, 0)
err = db.Set(ctx, storage.ModeSetPin, addr)
if err != nil {
t.Fatal(err)
}
runCountsTest(t, "setPin", db, 1, 0, 1, 1, 1, 0)
err = db.Set(ctx, storage.ModeSetPin, addr)
if err != nil {
t.Fatal(err)
}
runCountsTest(t, "setPin 2", db, 1, 0, 1, 1, 1, 0)
err = db.Set(ctx, storage.ModeSetUnpin, addr)
if err != nil {
t.Fatal(err)
}
runCountsTest(t, "setUnPin", db, 1, 0, 1, 1, 1, 0)
err = db.Set(ctx, storage.ModeSetUnpin, addr)
if err != nil {
t.Fatal(err)
}
runCountsTest(t, "setUnPin 2", db, 1, 0, 1, 1, 0, 0)
err = db.Set(ctx, storage.ModeSetPin, addr)
if err != nil {
t.Fatal(err)
}
runCountsTest(t, "setPin 3", db, 1, 0, 1, 1, 1, 0)
err = db.Set(ctx, storage.ModeSetSync, addr)
if err != nil {
t.Fatal(err)
}
runCountsTest(t, "setSync", db, 1, 1, 0, 1, 1, 0)
err = db.Set(ctx, storage.ModeSetUnpin, addr)
if err != nil {
t.Fatal(err)
}
runCountsTest(t, "setUnPin", db, 1, 1, 0, 1, 0, 1)
}
func runCountsTest(t *testing.T, name string, db *DB, r, a, push, pull, pin, gc int) {
t.Helper()
t.Run(name, func(t *testing.T) {
t.Helper()
t.Run("retrieval data Index count", newItemsCountTest(db.retrievalDataIndex, r))
t.Run("retrieval access Index count", newItemsCountTest(db.retrievalAccessIndex, a))
t.Run("push Index count", newItemsCountTest(db.pushIndex, push))
t.Run("pull Index count", newItemsCountTest(db.pullIndex, pull))
t.Run("pin Index count", newItemsCountTest(db.pinIndex, pin))
t.Run("gc index count", newItemsCountTest(db.gcIndex, gc))
t.Run("gc size", newIndexGCSizeTest(db))
})
}
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package localstore
import (
"errors"
"fmt"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/syndtr/goleveldb/leveldb"
)
// UnreserveBatch atomically unpins chunks of a batch in proximity order upto and including po.
// Unpinning will result in all chunks with pincounter 0 to be put in the gc index
// so if a chunk was only pinned by the reserve, unreserving it will make it gc-able.
func (db *DB) UnreserveBatch(id []byte, radius uint8) error {
db.batchMu.Lock()
defer db.batchMu.Unlock()
var (
item = shed.Item{
BatchID: id,
}
batch = new(leveldb.Batch)
)
i, err := db.postageRadiusIndex.Get(item)
if err != nil {
if !errors.Is(err, leveldb.ErrNotFound) {
return err
}
item.Radius = radius
if err := db.postageRadiusIndex.PutInBatch(batch, item); err != nil {
return err
}
return db.shed.WriteBatch(batch)
}
oldRadius := i.Radius
var gcSizeChange int64 // number to add or subtract from gcSize
unpin := func(item shed.Item) (stop bool, err error) {
c, err := db.setUnpin(batch, swarm.NewAddress(item.Address))
if err != nil {
return false, fmt.Errorf("unpin: %w", err)
}
gcSizeChange += c
return false, err
}
// iterate over chunk in bins
for bin := oldRadius; bin < radius; bin++ {
err := db.postageChunksIndex.Iterate(unpin, &shed.IterateOptions{Prefix: append(id, bin)})
if err != nil {
return err
}
// adjust gcSize
if err := db.incGCSizeInBatch(batch, gcSizeChange); err != nil {
return err
}
item.Radius = bin
if err := db.postageRadiusIndex.PutInBatch(batch, item); err != nil {
return err
}
if bin == swarm.MaxPO {
if err := db.postageRadiusIndex.DeleteInBatch(batch, item); err != nil {
return err
}
}
if err := db.shed.WriteBatch(batch); err != nil {
return err
}
batch = new(leveldb.Batch)
gcSizeChange = 0
}
gcSize, err := db.gcSize.Get()
if err != nil && !errors.Is(err, leveldb.ErrNotFound) {
return err
}
// trigger garbage collection if we reached the capacity
if gcSize >= db.capacity {
db.triggerGarbageCollection()
}
return nil
}
func withinRadius(db *DB, item shed.Item) bool {
po := db.po(swarm.NewAddress(item.Address))
return po >= item.Radius
}
This diff is collapsed.
......@@ -22,6 +22,7 @@ import (
"time"
"github.com/ethersphere/bee/pkg/flipflop"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/swarm"
)
......@@ -75,8 +76,9 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan swarm.Chunk, stop fun
return true, err
}
stamp := postage.NewStamp(dataItem.BatchID, dataItem.Sig)
select {
case chunks <- swarm.NewChunk(swarm.NewAddress(dataItem.Address), dataItem.Data).WithTagID(item.Tag):
case chunks <- swarm.NewChunk(swarm.NewAddress(dataItem.Address), dataItem.Data).WithTagID(item.Tag).WithStamp(stamp):
count++
// set next iteration start item
// when its chunk is successfully sent to channel
......
......@@ -19,6 +19,7 @@ package localstore
import (
"bytes"
"context"
"errors"
"fmt"
"sync"
"testing"
......@@ -74,8 +75,11 @@ func TestDB_SubscribePush(t *testing.T) {
// receive and validate addresses from the subscription
go func() {
var err error
var i int // address index
var (
err, ierr error
i int // address index
gotStamp, wantStamp []byte
)
for {
select {
case got, ok := <-ch:
......@@ -93,6 +97,16 @@ func TestDB_SubscribePush(t *testing.T) {
if !got.Address().Equal(want.Address()) {
err = fmt.Errorf("got chunk %v address %s, want %s", i, got.Address(), want.Address())
}
if gotStamp, ierr = got.Stamp().MarshalBinary(); ierr != nil {
err = ierr
}
if wantStamp, ierr = want.Stamp().MarshalBinary(); ierr != nil {
err = ierr
}
if !bytes.Equal(gotStamp, wantStamp) {
err = errors.New("stamps don't match")
}
i++
// send one and only one error per received address
select {
......
......@@ -25,6 +25,7 @@ type store struct {
storage.Storer
retrieval retrieval.Interface
logger logging.Logger
validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error)
recoveryCallback recovery.Callback // this is the callback to be executed when a chunk fails to be retrieved
}
......@@ -33,8 +34,8 @@ var (
)
// New returns a new NetStore that wraps a given Storer.
func New(s storage.Storer, rcb recovery.Callback, r retrieval.Interface, logger logging.Logger) storage.Storer {
return &store{Storer: s, recoveryCallback: rcb, retrieval: r, logger: logger}
func New(s storage.Storer, validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error), rcb recovery.Callback, r retrieval.Interface, logger logging.Logger) storage.Storer {
return &store{Storer: s, validStamp: validStamp, recoveryCallback: rcb, retrieval: r, logger: logger}
}
// Get retrieves a given chunk address.
......@@ -55,13 +56,25 @@ func (s *store) Get(ctx context.Context, mode storage.ModeGet, addr swarm.Addres
go s.recoveryCallback(addr, targets)
return nil, ErrRecoveryAttempt
}
stamp, err := ch.Stamp().MarshalBinary()
if err != nil {
return nil, err
}
putMode := storage.ModePutRequest
if mode == storage.ModeGetRequestPin {
putMode = storage.ModePutRequestPin
}
_, err = s.Storer.Put(ctx, putMode, ch)
cch, err := s.validStamp(ch, stamp)
if err != nil {
// if a chunk with an invalid postage stamp was received
// we force it into the cache.
putMode = storage.ModePutRequestCache
cch = ch
}
_, err = s.Storer.Put(ctx, putMode, cch)
if err != nil {
return nil, fmt.Errorf("netstore retrieve put: %w", err)
}
......
......@@ -16,6 +16,7 @@ import (
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/netstore"
postagetesting "github.com/ethersphere/bee/pkg/postage/testing"
"github.com/ethersphere/bee/pkg/pss"
"github.com/ethersphere/bee/pkg/recovery"
"github.com/ethersphere/bee/pkg/sctx"
......@@ -25,11 +26,12 @@ import (
)
var chunkData = []byte("mockdata")
var chunkStamp = postagetesting.MustNewStamp()
// TestNetstoreRetrieval verifies that a chunk is asked from the network whenever
// it is not found locally
func TestNetstoreRetrieval(t *testing.T) {
retrieve, store, nstore := newRetrievingNetstore(nil)
retrieve, store, nstore := newRetrievingNetstore(nil, noopValidStamp)
addr := swarm.MustParseHexAddress("000001")
_, err := nstore.Get(context.Background(), storage.ModeGetRequest, addr)
if err != nil {
......@@ -73,7 +75,7 @@ func TestNetstoreRetrieval(t *testing.T) {
// TestNetstoreNoRetrieval verifies that a chunk is not requested from the network
// whenever it is found locally.
func TestNetstoreNoRetrieval(t *testing.T) {
retrieve, store, nstore := newRetrievingNetstore(nil)
retrieve, store, nstore := newRetrievingNetstore(nil, noopValidStamp)
addr := swarm.MustParseHexAddress("000001")
// store should have the chunk in advance
......@@ -103,7 +105,7 @@ func TestRecovery(t *testing.T) {
callbackC: callbackWasCalled,
}
retrieve, _, nstore := newRetrievingNetstore(rec.recovery)
retrieve, _, nstore := newRetrievingNetstore(rec.recovery, noopValidStamp)
addr := swarm.MustParseHexAddress("deadbeef")
retrieve.failure = true
ctx := context.Background()
......@@ -123,7 +125,7 @@ func TestRecovery(t *testing.T) {
}
func TestInvalidRecoveryFunction(t *testing.T) {
retrieve, _, nstore := newRetrievingNetstore(nil)
retrieve, _, nstore := newRetrievingNetstore(nil, noopValidStamp)
addr := swarm.MustParseHexAddress("deadbeef")
retrieve.failure = true
ctx := context.Background()
......@@ -135,12 +137,60 @@ func TestInvalidRecoveryFunction(t *testing.T) {
}
}
func TestInvalidPostageStamp(t *testing.T) {
f := func(c swarm.Chunk, _ []byte) (swarm.Chunk, error) {
return nil, errors.New("invalid postage stamp")
}
retrieve, store, nstore := newRetrievingNetstore(nil, f)
addr := swarm.MustParseHexAddress("000001")
_, err := nstore.Get(context.Background(), storage.ModeGetRequest, addr)
if err != nil {
t.Fatal(err)
}
if !retrieve.called {
t.Fatal("retrieve request not issued")
}
if retrieve.callCount != 1 {
t.Fatalf("call count %d", retrieve.callCount)
}
if !retrieve.addr.Equal(addr) {
t.Fatalf("addresses not equal. got %s want %s", retrieve.addr, addr)
}
// store should have the chunk now
d, err := store.Get(context.Background(), storage.ModeGetRequest, addr)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(d.Data(), chunkData) {
t.Fatal("chunk data not equal to expected data")
}
if mode := store.GetModePut(addr); mode != storage.ModePutRequestCache {
t.Fatalf("wanted ModePutRequestCache but got %s", mode)
}
// check that the second call does not result in another retrieve request
d, err = nstore.Get(context.Background(), storage.ModeGetRequest, addr)
if err != nil {
t.Fatal(err)
}
if retrieve.callCount != 1 {
t.Fatalf("call count %d", retrieve.callCount)
}
if !bytes.Equal(d.Data(), chunkData) {
t.Fatal("chunk data not equal to expected data")
}
}
// returns a mock retrieval protocol, a mock local storage and a netstore
func newRetrievingNetstore(rec recovery.Callback) (ret *retrievalMock, mockStore, ns storage.Storer) {
func newRetrievingNetstore(rec recovery.Callback, validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error)) (ret *retrievalMock, mockStore *mock.MockStorer, ns storage.Storer) {
retrieve := &retrievalMock{}
store := mock.NewStorer()
logger := logging.New(ioutil.Discard, 0)
return retrieve, store, netstore.New(store, rec, retrieve, logger)
return retrieve, store, netstore.New(store, validStamp, rec, retrieve, logger)
}
type retrievalMock struct {
......@@ -157,7 +207,7 @@ func (r *retrievalMock) RetrieveChunk(ctx context.Context, addr swarm.Address) (
r.called = true
atomic.AddInt32(&r.callCount, 1)
r.addr = addr
return swarm.NewChunk(addr, chunkData), nil
return swarm.NewChunk(addr, chunkData).WithStamp(chunkStamp), nil
}
type mockRecovery struct {
......@@ -172,3 +222,7 @@ func (mr *mockRecovery) recovery(chunkAddress swarm.Address, targets pss.Targets
func (r *mockRecovery) RetrieveChunk(ctx context.Context, addr swarm.Address) (chunk swarm.Chunk, err error) {
return nil, fmt.Errorf("chunk not found")
}
var noopValidStamp = func(c swarm.Chunk, _ []byte) (swarm.Chunk, error) {
return c, nil
}
This diff is collapsed.
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package postage
import (
"encoding/binary"
"math/big"
)
// Batch represents a postage batch, a payment on the blockchain.
type Batch struct {
ID []byte // batch ID
Value *big.Int // normalised balance of the batch
Start uint64 // block number the batch was created
Owner []byte // owner's ethereum address
Depth uint8 // batch depth, i.e., size = 2^{depth}
Radius uint8 // reserve radius, non-serialised
}
// MarshalBinary implements BinaryMarshaller. It will attempt to serialize the
// postage batch to a byte slice.
// serialised as ID(32)|big endian value(32)|start block(8)|owner addr(20)|depth(1)
func (b *Batch) MarshalBinary() ([]byte, error) {
out := make([]byte, 93)
copy(out, b.ID)
value := b.Value.Bytes()
copy(out[64-len(value):], value)
binary.BigEndian.PutUint64(out[64:72], b.Start)
copy(out[72:], b.Owner)
out[92] = b.Depth
return out, nil
}
// UnmarshalBinary implements BinaryUnmarshaller. It will attempt deserialize
// the given byte slice into the batch.
func (b *Batch) UnmarshalBinary(buf []byte) error {
b.ID = buf[:32]
b.Value = big.NewInt(0).SetBytes(buf[32:64])
b.Start = binary.BigEndian.Uint64(buf[64:72])
b.Owner = buf[72:92]
b.Depth = buf[92]
return nil
}
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package postage_test
import (
"bytes"
"testing"
"github.com/ethersphere/bee/pkg/postage"
postagetesting "github.com/ethersphere/bee/pkg/postage/testing"
)
// TestBatchMarshalling tests the idempotence of binary marshal/unmarshal for a
// Batch.
func TestBatchMarshalling(t *testing.T) {
a := postagetesting.MustNewBatch()
buf, err := a.MarshalBinary()
if err != nil {
t.Fatal(err)
}
if len(buf) != 93 {
t.Fatalf("invalid length for serialised batch. expected 93, got %d", len(buf))
}
b := &postage.Batch{}
if err := b.UnmarshalBinary(buf); err != nil {
t.Fatalf("unexpected error unmarshalling batch: %v", err)
}
if !bytes.Equal(b.ID, a.ID) {
t.Fatalf("id mismatch, expected %x, got %x", a.ID, b.ID)
}
if !bytes.Equal(b.Owner, a.Owner) {
t.Fatalf("owner mismatch, expected %x, got %x", a.Owner, b.Owner)
}
if a.Value.Uint64() != b.Value.Uint64() {
t.Fatalf("value mismatch, expected %d, got %d", a.Value.Uint64(), b.Value.Uint64())
}
if a.Start != b.Start {
t.Fatalf("start mismatch, expected %d, got %d", a.Start, b.Start)
}
if a.Depth != b.Depth {
t.Fatalf("depth mismatch, expected %d, got %d", a.Depth, b.Depth)
}
}
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package batchservice
import (
"encoding/hex"
"fmt"
"math/big"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/postage"
)
type batchService struct {
storer postage.Storer
logger logging.Logger
listener postage.Listener
}
type Interface interface {
postage.EventUpdater
}
// New will create a new BatchService.
func New(storer postage.Storer, logger logging.Logger, listener postage.Listener) Interface {
return &batchService{storer, logger, listener}
}
// Create will create a new batch with the given ID, owner value and depth and
// stores it in the BatchStore.
func (svc *batchService) Create(id, owner []byte, normalisedBalance *big.Int, depth uint8) error {
b := &postage.Batch{
ID: id,
Owner: owner,
Value: big.NewInt(0),
Start: svc.storer.GetChainState().Block,
Depth: depth,
}
err := svc.storer.Put(b, normalisedBalance, depth)
if err != nil {
return fmt.Errorf("put: %w", err)
}
svc.logger.Debugf("batch service: created batch id %s", hex.EncodeToString(b.ID))
return nil
}
// TopUp implements the EventUpdater interface. It tops ups a batch with the
// given ID with the given amount.
func (svc *batchService) TopUp(id []byte, normalisedBalance *big.Int) error {
b, err := svc.storer.Get(id)
if err != nil {
return fmt.Errorf("get: %w", err)
}
err = svc.storer.Put(b, normalisedBalance, b.Depth)
if err != nil {
return fmt.Errorf("put: %w", err)
}
svc.logger.Debugf("batch service: topped up batch id %s from %v to %v", hex.EncodeToString(b.ID), b.Value, normalisedBalance)
return nil
}
// UpdateDepth implements the EventUpdater inteface. It sets the new depth of a
// batch with the given ID.
func (svc *batchService) UpdateDepth(id []byte, depth uint8, normalisedBalance *big.Int) error {
b, err := svc.storer.Get(id)
if err != nil {
return fmt.Errorf("get: %w", err)
}
err = svc.storer.Put(b, normalisedBalance, depth)
if err != nil {
return fmt.Errorf("put: %w", err)
}
svc.logger.Debugf("batch service: updated depth of batch id %s from %d to %d", hex.EncodeToString(b.ID), b.Depth, depth)
return nil
}
// UpdatePrice implements the EventUpdater interface. It sets the current
// price from the chain in the service chain state.
func (svc *batchService) UpdatePrice(price *big.Int) error {
cs := svc.storer.GetChainState()
cs.Price = price
if err := svc.storer.PutChainState(cs); err != nil {
return fmt.Errorf("put chain state: %w", err)
}
svc.logger.Debugf("batch service: updated chain price to %s", price)
return nil
}
func (svc *batchService) UpdateBlockNumber(blockNumber uint64) error {
cs := svc.storer.GetChainState()
diff := big.NewInt(0).SetUint64(blockNumber - cs.Block)
cs.Total.Add(cs.Total, diff.Mul(diff, cs.Price))
cs.Block = blockNumber
if err := svc.storer.PutChainState(cs); err != nil {
return fmt.Errorf("put chain state: %w", err)
}
svc.logger.Debugf("batch service: updated block height to %d", blockNumber)
return nil
}
func (svc *batchService) Start() <-chan struct{} {
cs := svc.storer.GetChainState()
return svc.listener.Listen(cs.Block+1, svc)
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package postagecontract
var (
PostageStampABI = postageStampABI
BatchCreatedTopic = batchCreatedTopic
)
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment