1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package internal_test
import (
"context"
"strconv"
"strings"
"testing"
"github.com/ethersphere/bee/pkg/file/splitter/internal"
test "github.com/ethersphere/bee/pkg/file/testing"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage/mock"
"github.com/ethersphere/bee/pkg/swarm"
)
var (
start = 0
end = test.GetVectorCount()
)
type putWrapper struct {
putter func(context.Context, swarm.Chunk) ([]bool, error)
}
func (p putWrapper) Put(ctx context.Context, ch swarm.Chunk) ([]bool, error) {
return p.putter(ctx, ch)
}
// TestSplitterJobPartialSingleChunk passes sub-chunk length data to the splitter,
// verifies the correct hash is returned, and that write after Sum/complete Write
// returns error.
func TestSplitterJobPartialSingleChunk(t *testing.T) {
store := mock.NewStorer()
putter := putWrapper{
putter: func(ctx context.Context, ch swarm.Chunk) ([]bool, error) {
return store.Put(ctx, storage.ModePutUpload, ch)
},
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
data := []byte("foo")
j := internal.NewSimpleSplitterJob(ctx, putter, int64(len(data)), false)
c, err := j.Write(data)
if err != nil {
t.Fatal(err)
}
if c < len(data) {
t.Fatalf("short write %d", c)
}
hashResult := j.Sum(nil)
addressResult := swarm.NewAddress(hashResult)
bmtHashOfFoo := "2387e8e7d8a48c2a9339c97c1dc3461a9a7aa07e994c5cb8b38fd7c1b3e6ea48"
address := swarm.MustParseHexAddress(bmtHashOfFoo)
if !addressResult.Equal(address) {
t.Fatalf("expected %v, got %v", address, addressResult)
}
_, err = j.Write([]byte("bar"))
if err == nil {
t.Fatal("expected error writing after write/sum complete")
}
}
// TestSplitterJobVector verifies file hasher results of legacy test vectors
func TestSplitterJobVector(t *testing.T) {
for i := start; i < end-2; i++ {
dataLengthStr := strconv.Itoa(i)
t.Run(dataLengthStr, testSplitterJobVector)
}
}
func testSplitterJobVector(t *testing.T) {
var (
paramstring = strings.Split(t.Name(), "/")
dataIdx, _ = strconv.ParseInt(paramstring[1], 10, 0)
store = mock.NewStorer()
putter = putWrapper{
putter: func(ctx context.Context, ch swarm.Chunk) ([]bool, error) {
return store.Put(ctx, storage.ModePutUpload, ch)
},
}
)
data, expect := test.GetVector(t, int(dataIdx))
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
j := internal.NewSimpleSplitterJob(ctx, putter, int64(len(data)), false)
for i := 0; i < len(data); i += swarm.ChunkSize {
l := swarm.ChunkSize
if len(data)-i < swarm.ChunkSize {
l = len(data) - i
}
c, err := j.Write(data[i : i+l])
if err != nil {
t.Fatal(err)
}
if c < l {
t.Fatalf("short write %d", c)
}
}
actualBytes := j.Sum(nil)
actual := swarm.NewAddress(actualBytes)
if !expect.Equal(actual) {
t.Fatalf("expected %v, got %v", expect, actual)
}
}