Commit ebb776f4 authored by lash's avatar lash Committed by GitHub

Add CLI for file entry add and resolve (#251)

parent 326b50f6
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
cmdfile "github.com/ethersphere/bee/cmd/internal/file"
"github.com/ethersphere/bee/pkg/collection/entry"
"github.com/ethersphere/bee/pkg/file/joiner"
"github.com/ethersphere/bee/pkg/file/splitter"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/spf13/cobra"
)
const (
defaultMimeType = "application/octet-stream"
limitMetadataLength = swarm.ChunkSize
)
var (
filename string // flag variable, filename to use in metadata
mimeType string // flag variable, mime type to use in metadata
outDir string // flag variable, output dir for fsStore
outFileForce bool // flag variable, overwrite output file if exists
host string // flag variable, http api host
port int // flag variable, http api port
useHttp bool // flag variable, skips http api if not set
ssl bool // flag variable, uses https for api if set
retrieve bool // flag variable, if set will resolve and retrieve referenced file
verbosity string // flag variable, debug level
logger logging.Logger
)
// getEntry handles retrieving and writing a file from the file entry
// referenced by the given address.
func getEntry(cmd *cobra.Command, args []string) (err error) {
// process the reference to retrieve
addr, err := swarm.ParseHexAddress(args[0])
if err != nil {
return err
}
// initialize interface with HTTP API
store := cmdfile.NewApiStore(host, port, ssl)
buf := bytes.NewBuffer(nil)
writeCloser := cmdfile.NopWriteCloser(buf)
limitBuf := cmdfile.NewLimitWriteCloser(writeCloser, limitMetadataLength)
j := joiner.NewSimpleJoiner(store)
err = cmdfile.JoinReadAll(j, addr, limitBuf)
if err != nil {
return err
}
e := &entry.Entry{}
err = e.UnmarshalBinary(buf.Bytes())
if err != nil {
return err
}
buf = bytes.NewBuffer(nil)
err = cmdfile.JoinReadAll(j, e.Metadata(), buf)
if err != nil {
return err
}
// retrieve metadata
metaData := &entry.Metadata{}
err = json.Unmarshal(buf.Bytes(), metaData)
if err != nil {
return err
}
logger.Debugf("Filename: %s", metaData.Filename)
logger.Debugf("MIME-type: %s", metaData.MimeType)
if outDir == "" {
outDir = "."
} else {
err := os.MkdirAll(outDir, 0o777) // skipcq: GSC-G301
if err != nil {
return err
}
}
outFilePath := filepath.Join(outDir, metaData.Filename)
// create output dir if not exist
if outDir != "." {
err := os.MkdirAll(outDir, 0o777) // skipcq: GSC-G301
if err != nil {
return err
}
}
// protect any existing file unless explicitly told not to
outFileFlags := os.O_CREATE | os.O_WRONLY
if outFileForce {
outFileFlags |= os.O_TRUNC
} else {
outFileFlags |= os.O_EXCL
}
// open the file
outFile, err := os.OpenFile(outFilePath, outFileFlags, 0o666) // skipcq: GSC-G302
if err != nil {
return err
}
defer outFile.Close()
return cmdfile.JoinReadAll(j, e.Reference(), outFile)
}
// putEntry creates a new file entry with the given reference.
func putEntry(cmd *cobra.Command, args []string) (err error) {
// process the reference to retrieve
addr, err := swarm.ParseHexAddress(args[0])
if err != nil {
return err
}
// add the fsStore and/or apiStore, depending on flags
stores := cmdfile.NewTeeStore()
if outDir != "" {
err := os.MkdirAll(outDir, 0o777) // skipcq: GSC-G301
if err != nil {
return err
}
store := cmdfile.NewFsStore(outDir)
stores.Add(store)
}
if useHttp {
store := cmdfile.NewApiStore(host, port, ssl)
stores.Add(store)
}
// create metadata object, with defaults for missing values
if filename == "" {
filename = args[0]
}
if mimeType == "" {
mimeType = defaultMimeType
}
metadata := entry.NewMetadata(filename)
metadata.MimeType = mimeType
// serialize metadata and send it to splitter
metadataBytes, err := json.Marshal(metadata)
if err != nil {
return err
}
logger.Debugf("metadata contents: %s", metadataBytes)
// set up splitter to process the metadata
s := splitter.NewSimpleSplitter(stores)
ctx := context.Background()
// first add metadata
metadataBuf := bytes.NewBuffer(metadataBytes)
metadataReader := io.LimitReader(metadataBuf, int64(len(metadataBytes)))
metadataReadCloser := ioutil.NopCloser(metadataReader)
metadataAddr, err := s.Split(ctx, metadataReadCloser, int64(len(metadataBytes)))
if err != nil {
return err
}
// create entry from given reference and metadata,
// serialize and send to splitter
fileEntry := entry.New(addr, metadataAddr)
fileEntryBytes, err := fileEntry.MarshalBinary()
if err != nil {
return err
}
fileEntryBuf := bytes.NewBuffer(fileEntryBytes)
fileEntryReader := io.LimitReader(fileEntryBuf, int64(len(fileEntryBytes)))
fileEntryReadCloser := ioutil.NopCloser(fileEntryReader)
fileEntryAddr, err := s.Split(ctx, fileEntryReadCloser, int64(len(fileEntryBytes)))
if err != nil {
return err
}
// output reference to file entry
cmd.Println(fileEntryAddr)
return nil
}
// Entry is the underlying procedure for the CLI command
func Entry(cmd *cobra.Command, args []string) (err error) {
logger, err = cmdfile.SetLogger(cmd, verbosity)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
if retrieve {
return getEntry(cmd, args)
}
return putEntry(cmd, args)
}
func main() {
c := &cobra.Command{
Use: "entry <reference>",
Short: "Create or resolve a file entry",
Long: `Creates a file entry, or retrieve the data referenced by the entry and its metadata.
Example:
$ bee-file --mime-type text/plain --filename foo.txt 2387e8e7d8a48c2a9339c97c1dc3461a9a7aa07e994c5cb8b38fd7c1b3e6ea48
> 94434d3312320fab70428c39b79dffb4abc3dbedf3e1562384a61ceaf8a7e36b
$ bee-file --output-dir /tmp 94434d3312320fab70428c39b79dffb4abc3dbedf3e1562384a61ceaf8a7e36b
$ cat /tmp/bar.txt
Creating a file entry:
The default file name is the hex representation of the swarm hash passed as argument, and the default mime-type is application/octet-stream. Both can be explicitly set with --filename and --mime-type respectively. If --output-dir is given, the metadata and entry chunks are written to the specified directory.
Resolving a file entry:
If --output-dir is set, the retrieved file will be written to the speficied directory. Otherwise it will be written to the current directory. Use -f to force overwriting an existing file.`,
RunE: Entry,
SilenceUsage: true,
}
c.Flags().StringVar(&filename, "filename", "", "filename to use in entry")
c.Flags().StringVar(&mimeType, "mime-type", "", "mime-type to use in collection")
c.Flags().BoolVarP(&outFileForce, "force", "f", false, "overwrite existing output file")
c.Flags().StringVarP(&outDir, "output-dir", "d", "", "save directory")
c.Flags().StringVar(&host, "host", "127.0.0.1", "api host")
c.Flags().IntVar(&port, "port", 8080, "api port")
c.Flags().BoolVar(&ssl, "ssl", false, "use ssl")
c.Flags().BoolVarP(&retrieve, "retrieve", "r", false, "retrieve file from referenced entry")
c.Flags().BoolVar(&useHttp, "http", false, "save entry to bee http api")
c.Flags().StringVar(&verbosity, "info", "0", "log verbosity level 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=trace")
c.SetOutput(c.OutOrStdout())
err := c.Execute()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
}
......@@ -5,17 +5,13 @@
package main
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
cmdfile "github.com/ethersphere/bee/cmd/internal/file"
"github.com/ethersphere/bee/pkg/file/joiner"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/spf13/cobra"
)
......@@ -26,50 +22,17 @@ var (
host string // flag variable, http api host
port int // flag variable, http api port
ssl bool // flag variable, uses https for api if set
verbosity string // flag variable, debug level
logger logging.Logger
)
// apiStore provies a storage.Getter that retrieves chunks from swarm through the HTTP chunk API.
type apiStore struct {
baseUrl string
}
// newApiStore creates a new apiStore
func newApiStore(host string, port int, ssl bool) storage.Getter {
scheme := "http"
if ssl {
scheme += "s"
}
u := &url.URL{
Host: fmt.Sprintf("%s:%d", host, port),
Scheme: scheme,
Path: "bzz-chunk",
}
return &apiStore{
baseUrl: u.String(),
}
}
// Get implements storage.Getter
func (a *apiStore) Get(ctx context.Context, mode storage.ModeGet, address swarm.Address) (ch swarm.Chunk, err error) {
addressHex := address.String()
url := strings.Join([]string{a.baseUrl, addressHex}, "/")
res, err := http.DefaultClient.Get(url)
if err != nil {
return nil, err
}
if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("chunk %s not found", addressHex)
}
chunkData, err := ioutil.ReadAll(res.Body)
// Join is the underlying procedure for the CLI command
func Join(cmd *cobra.Command, args []string) (err error) {
logger, err = cmdfile.SetLogger(cmd, verbosity)
if err != nil {
return nil, err
return err
}
ch = swarm.NewChunk(address, chunkData)
return ch, nil
}
// Join is the underlying procedure for the CLI command
func Join(cmd *cobra.Command, args []string) (err error) {
// if output file is specified, create it if it does not exist
var outFile *os.File
if outFilePath != "" {
......@@ -94,8 +57,10 @@ func Join(cmd *cobra.Command, args []string) (err error) {
return err
}
defer outFile.Close()
logger.Debugf("writing to %s", outFilePath)
} else {
outFile = os.Stdout
logger.Debugf("writing to stdout")
}
// process the reference to retrieve
......@@ -105,36 +70,11 @@ func Join(cmd *cobra.Command, args []string) (err error) {
}
// initialize interface with HTTP API
store := newApiStore(host, port, ssl)
store := cmdfile.NewApiStore(host, port, ssl)
// create the join and get its data reader
j := joiner.NewSimpleJoiner(store)
r, l, err := j.Join(context.Background(), addr)
if err != nil {
return err
}
// join, rinse, repeat until done
data := make([]byte, swarm.ChunkSize)
var total int64
for i := int64(0); i < l; i += swarm.ChunkSize {
cr, err := r.Read(data)
if err != nil {
return err
}
total += int64(cr)
cw, err := outFile.Write(data[:cr])
if err != nil {
return err
}
if cw != cr {
return fmt.Errorf("short wrote %d of %d for chunk %d", cw, cr, i)
}
}
if total != l {
return fmt.Errorf("received only %d of %d total bytes", total, l)
}
return nil
return cmdfile.JoinReadAll(j, addr, outFile)
}
func main() {
......@@ -153,7 +93,9 @@ Will output retrieved data to stdout.`,
c.Flags().StringVar(&host, "host", "127.0.0.1", "api host")
c.Flags().IntVar(&port, "port", 8080, "api port")
c.Flags().BoolVar(&ssl, "ssl", false, "use ssl")
c.Flags().StringVar(&verbosity, "info", "0", "log verbosity level 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=trace")
c.SetOutput(c.OutOrStdout())
err := c.Execute()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
......
......@@ -5,21 +5,16 @@
package main
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
cmdfile "github.com/ethersphere/bee/cmd/internal/file"
"github.com/ethersphere/bee/pkg/file/splitter"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/logging"
"github.com/spf13/cobra"
)
......@@ -28,125 +23,22 @@ var (
inputLength int64 // flag variable, limit of data input
host string // flag variable, http api host
port int // flag variable, http api port
noHttp bool // flag variable, skips http api if set
useHttp bool // flag variable, skips http api if not set
ssl bool // flag variable, uses https for api if set
verbosity string // flag variable, debug level
logger logging.Logger
)
// teeStore provides a storage.Putter that can put to multiple underlying storage.Putters
type teeStore struct {
putters []storage.Putter
}
// newTeeStore creates a new teeStore
func newTeeStore() *teeStore {
return &teeStore{}
}
// Add adds a storage.Putter
func (t *teeStore) Add(putter storage.Putter) {
t.putters = append(t.putters, putter)
}
// Put implements storage.Putter
func (t *teeStore) Put(ctx context.Context, mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err error) {
for _, putter := range t.putters {
_, err := putter.Put(ctx, mode, chs...)
if err != nil {
return nil, err
}
}
return nil, nil
}
// fsStore provides a storage.Putter that writes chunks directly to the filesystem.
// Each chunk is a separate file, where the hex address of the chunk is the file name.
type fsStore struct {
path string
}
// newFsStore creates a new fsStore
func newFsStore(path string) storage.Putter {
return &fsStore{
path: path,
}
}
// Put implements storage.Putter
func (f *fsStore) Put(ctx context.Context, mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err error) {
for _, ch := range chs {
chunkPath := filepath.Join(f.path, ch.Address().String())
err := ioutil.WriteFile(chunkPath, ch.Data(), 0o666)
if err != nil {
return nil, err
}
}
return nil, nil
}
// apiStore provies a storage.Putter that adds chunks to swarm through the HTTP chunk API.
type apiStore struct {
baseUrl string
}
// newApiStore creates a new apiStor
func newApiStore(host string, port int, ssl bool) storage.Putter {
scheme := "http"
if ssl {
scheme += "s"
}
u := &url.URL{
Host: fmt.Sprintf("%s:%d", host, port),
Scheme: scheme,
Path: "bzz-chunk",
}
return &apiStore{
baseUrl: u.String(),
}
}
// Put implements storage.Putter
func (a *apiStore) Put(ctx context.Context, mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err error) {
c := http.DefaultClient
for _, ch := range chs {
addr := ch.Address().String()
buf := bytes.NewReader(ch.Data())
url := strings.Join([]string{a.baseUrl, addr}, "/")
res, err := c.Post(url, "application/octet-stream", buf)
if err != nil {
return nil, err
}
if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("upload failed: %v", res.Status)
}
}
return nil, nil
}
// limitReadCloser wraps the input to the application to limit the input to the given count flag.
type limitReadCloser struct {
io.Reader
closeFunc func() error
}
// newLimitReadCloser creates a new limitReadCloser.
func newLimitReadCloser(r io.Reader, closeFunc func() error, c int64) io.ReadCloser {
return &limitReadCloser{
Reader: io.LimitReader(r, c),
closeFunc: closeFunc,
}
}
// Close implements io.Closer
func (l *limitReadCloser) Close() error {
return l.closeFunc()
}
// Split is the underlying procedure for the CLI command
func Split(cmd *cobra.Command, args []string) (err error) {
var infile io.ReadCloser
logger, err = cmdfile.SetLogger(cmd, verbosity)
if err != nil {
return err
}
// if one arg is set, this is the input file
// if not, we are reading from standard input
var infile io.ReadCloser
if len(args) > 0 {
// get the file length
......@@ -170,28 +62,34 @@ func Split(cmd *cobra.Command, args []string) (err error) {
if err != nil {
return err
}
infile = newLimitReadCloser(f, f.Close, inputLength)
fileReader := io.LimitReader(f, inputLength)
infile = ioutil.NopCloser(fileReader)
logger.Debugf("using %d bytes from file %s as input", fileLength, args[0])
} else {
// this simple splitter is too stupid to handle open-ended input, sadly
if inputLength == 0 {
return errors.New("must specify length of input on stdin")
}
infile = newLimitReadCloser(os.Stdin, func() error { return nil }, inputLength)
stdinReader := io.LimitReader(os.Stdin, inputLength)
infile = ioutil.NopCloser(stdinReader)
logger.Debugf("using %d bytes from standard input", inputLength)
}
// add the fsStore and/or apiStore, depending on flags
stores := newTeeStore()
stores := cmdfile.NewTeeStore()
if outdir != "" {
err := os.MkdirAll(outdir, 0o777) // skipcq: GSC-G301
if err != nil {
return err
}
store := newFsStore(outdir)
store := cmdfile.NewFsStore(outdir)
stores.Add(store)
logger.Debugf("using directory %s for output", outdir)
}
if !noHttp {
store := newApiStore(host, port, ssl)
if useHttp {
store := cmdfile.NewApiStore(host, port, ssl)
stores.Add(store)
logger.Debugf("using bee http (ssl=%v) api on %s:%d for output", ssl, host, port)
}
// split and rule
......@@ -204,7 +102,7 @@ func Split(cmd *cobra.Command, args []string) (err error) {
}
// output the resulting hash
fmt.Println(addr)
cmd.Println(addr)
return nil
}
......@@ -231,7 +129,10 @@ Chunks are saved in individual files, and the file names will be the hex address
c.Flags().StringVar(&host, "host", "127.0.0.1", "api host")
c.Flags().IntVar(&port, "port", 8080, "api port")
c.Flags().BoolVar(&ssl, "ssl", false, "use ssl")
c.Flags().BoolVar(&noHttp, "no-http", false, "skip http put")
c.Flags().BoolVar(&useHttp, "http", false, "save chunks to bee http api")
c.Flags().StringVar(&verbosity, "info", "0", "log verbosity level 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=trace")
c.SetOutput(c.OutOrStdout())
err := c.Execute()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
......
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package file
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path/filepath"
"strings"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/ethersphere/bee/pkg/file"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
)
// nopWriteCloser wraps a io.Writer in the same manner as ioutil.NopCloser does
// with an io.Reader.
type nopWriteCloser struct {
io.Writer
}
// NopWriteCloser returns a new io.WriteCloser with the given writer as io.Writer.
func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{
Writer: w,
}
}
// Close implements io.Closer
func (n *nopWriteCloser) Close() error {
return nil
}
// putGetter wraps both storage.Putter and storage.Getter interfaces
type putGetter interface {
storage.Putter
storage.Getter
}
// TeeStore provides a storage.Putter that can put to multiple underlying storage.Putters.
type TeeStore struct {
putters []storage.Putter
}
// NewTeeStore creates a new TeeStore.
func NewTeeStore() *TeeStore {
return &TeeStore{}
}
// Add adds a storage.Putter.
func (t *TeeStore) Add(putter storage.Putter) {
t.putters = append(t.putters, putter)
}
// Put implements storage.Putter.
func (t *TeeStore) Put(ctx context.Context, mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err error) {
for _, putter := range t.putters {
_, err := putter.Put(ctx, mode, chs...)
if err != nil {
return nil, err
}
}
exist = make([]bool, len(chs))
return exist, nil
}
// FsStore provides a storage.Putter that writes chunks directly to the filesystem.
// Each chunk is a separate file, where the hex address of the chunk is the file name.
type FsStore struct {
path string
}
// NewFsStore creates a new FsStore.
func NewFsStore(path string) storage.Putter {
return &FsStore{
path: path,
}
}
// Put implements storage.Putter.
func (f *FsStore) Put(ctx context.Context, mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err error) {
for _, ch := range chs {
chunkPath := filepath.Join(f.path, ch.Address().String())
err := ioutil.WriteFile(chunkPath, ch.Data(), 0o666)
if err != nil {
return nil, err
}
}
exist = make([]bool, len(chs))
return exist, nil
}
// ApiStore provies a storage.Putter that adds chunks to swarm through the HTTP chunk API.
type ApiStore struct {
Client *http.Client
baseUrl string
}
// NewApiStore creates a new ApiStore.
func NewApiStore(host string, port int, ssl bool) putGetter {
scheme := "http"
if ssl {
scheme += "s"
}
u := &url.URL{
Host: fmt.Sprintf("%s:%d", host, port),
Scheme: scheme,
Path: "bzz-chunk",
}
return &ApiStore{
Client: http.DefaultClient,
baseUrl: u.String(),
}
}
// Put implements storage.Putter.
func (a *ApiStore) Put(ctx context.Context, mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err error) {
for _, ch := range chs {
addr := ch.Address().String()
buf := bytes.NewReader(ch.Data())
url := strings.Join([]string{a.baseUrl, addr}, "/")
res, err := a.Client.Post(url, "application/octet-stream", buf)
if err != nil {
return nil, err
}
if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("upload failed: %v", res.Status)
}
}
exist = make([]bool, len(chs))
return exist, nil
}
// Get implements storage.Getter.
func (a *ApiStore) Get(ctx context.Context, mode storage.ModeGet, address swarm.Address) (ch swarm.Chunk, err error) {
addressHex := address.String()
url := strings.Join([]string{a.baseUrl, addressHex}, "/")
res, err := http.DefaultClient.Get(url)
if err != nil {
return nil, err
}
if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("chunk %s not found", addressHex)
}
chunkData, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
ch = swarm.NewChunk(address, chunkData)
return ch, nil
}
// LimitWriteCloser limits the output from the application.
type LimitWriteCloser struct {
io.WriteCloser
total int64
limit int64
}
// NewLimitWriteCloser creates a new LimitWriteCloser.
func NewLimitWriteCloser(w io.WriteCloser, c int64) io.WriteCloser {
return &LimitWriteCloser{
WriteCloser: w,
limit: c,
}
}
// Write implements io.Writer.
func (l *LimitWriteCloser) Write(b []byte) (int, error) {
if l.total+int64(len(b)) > l.limit {
return 0, errors.New("overflow")
}
c, err := l.WriteCloser.Write(b)
l.total += int64(c)
return c, err
}
// JoinReadAll reads all output from the provided joiner.
func JoinReadAll(j file.Joiner, addr swarm.Address, outFile io.Writer) error {
r, l, err := j.Join(context.Background(), addr)
if err != nil {
return err
}
// join, rinse, repeat until done
data := make([]byte, swarm.ChunkSize)
var total int64
for i := int64(0); i < l; i += swarm.ChunkSize {
cr, err := r.Read(data)
if err != nil {
return err
}
total += int64(cr)
cw, err := outFile.Write(data[:cr])
if err != nil {
return err
}
if cw != cr {
return fmt.Errorf("short wrote %d of %d for chunk %d", cw, cr, i)
}
}
if total != l {
return fmt.Errorf("received only %d of %d total bytes", total, l)
}
return nil
}
func SetLogger(cmd *cobra.Command, verbosityString string) (logger logging.Logger, err error) {
v := strings.ToLower(verbosityString)
switch v {
case "0", "silent":
logger = logging.New(ioutil.Discard, 0)
case "1", "error":
logger = logging.New(cmd.OutOrStderr(), logrus.ErrorLevel)
case "2", "warn":
logger = logging.New(cmd.OutOrStderr(), logrus.WarnLevel)
case "3", "info":
logger = logging.New(cmd.OutOrStderr(), logrus.InfoLevel)
case "4", "debug":
logger = logging.New(cmd.OutOrStderr(), logrus.DebugLevel)
case "5", "trace":
logger = logging.New(cmd.OutOrStderr(), logrus.TraceLevel)
default:
return nil, fmt.Errorf("unknown verbosity level %q", v)
}
return logger, nil
}
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package file_test
import (
"bytes"
"context"
"io"
"io/ioutil"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"strconv"
"testing"
cmdfile "github.com/ethersphere/bee/cmd/internal/file"
"github.com/ethersphere/bee/pkg/api"
"github.com/ethersphere/bee/pkg/file"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage/mock"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
)
const (
hashOfFoo = "2387e8e7d8a48c2a9339c97c1dc3461a9a7aa07e994c5cb8b38fd7c1b3e6ea48"
)
// TestApiStore verifies that the api store layer does not distort data, and that same
// data successfully posted can be retrieved from http backend.
func TestApiStore(t *testing.T) {
storer := mock.NewStorer()
ctx := context.Background()
srvUrl := newTestServer(t, storer)
host := srvUrl.Hostname()
port, err := strconv.Atoi(srvUrl.Port())
if err != nil {
t.Fatal(err)
}
a := cmdfile.NewApiStore(host, port, false)
chunkAddr := swarm.MustParseHexAddress(hashOfFoo)
chunkData := []byte("foo")
ch := swarm.NewChunk(chunkAddr, chunkData)
_, err = a.Put(ctx, storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
_, err = storer.Get(ctx, storage.ModeGetRequest, chunkAddr)
if err != nil {
t.Fatal(err)
}
chResult, err := a.Get(ctx, storage.ModeGetRequest, chunkAddr)
if err != nil {
t.Fatal(err)
}
if !ch.Equal(chResult) {
t.Fatal("chunk mismatch")
}
}
// TestFsStore verifies that the fs store layer does not distort data, and that the
// resulting stored data matches what is submitted.
func TestFsStore(t *testing.T) {
tmpPath, err := ioutil.TempDir("", "cli-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpPath)
storer := cmdfile.NewFsStore(tmpPath)
chunkAddr := swarm.MustParseHexAddress(hashOfFoo)
chunkData := []byte("foo")
ch := swarm.NewChunk(chunkAddr, chunkData)
ctx := context.Background()
_, err = storer.Put(ctx, storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
chunkFilename := filepath.Join(tmpPath, hashOfFoo)
chunkDataResult, err := ioutil.ReadFile(chunkFilename)
if err != nil {
t.Fatal(err)
}
chResult := swarm.NewChunk(chunkAddr, chunkDataResult)
if !ch.Equal(chResult) {
t.Fatal("chunk mismatch")
}
}
// TestTeeStore verifies that the TeeStore writes to all added stores.
func TestTeeStore(t *testing.T) {
storeFee := mock.NewStorer()
storeFi := mock.NewStorer()
storeFo := mock.NewStorer()
storeFum := cmdfile.NewTeeStore()
storeFum.Add(storeFee)
storeFum.Add(storeFi)
storeFum.Add(storeFo)
chunkAddr := swarm.MustParseHexAddress(hashOfFoo)
chunkData := []byte("foo")
ch := swarm.NewChunk(chunkAddr, chunkData)
ctx := context.Background()
var err error
_, err = storeFum.Put(ctx, storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
_, err = storeFee.Get(ctx, storage.ModeGetRequest, chunkAddr)
if err != nil {
t.Fatal(err)
}
_, err = storeFi.Get(ctx, storage.ModeGetRequest, chunkAddr)
if err != nil {
t.Fatal(err)
}
_, err = storeFo.Get(ctx, storage.ModeGetRequest, chunkAddr)
if err != nil {
t.Fatal(err)
}
}
// TestLimitWriter verifies that writing will fail when capacity is exceeded.
func TestLimitWriter(t *testing.T) {
buf := bytes.NewBuffer(nil)
data := []byte("foo")
writeCloser := cmdfile.NopWriteCloser(buf)
w := cmdfile.NewLimitWriteCloser(writeCloser, int64(len(data)))
c, err := w.Write(data)
if err != nil {
t.Fatal(err)
}
if c < 3 {
t.Fatal("short write")
}
if !bytes.Equal(buf.Bytes(), data) {
t.Fatalf("expected written data %x, got %x", data, buf.Bytes())
}
_, err = w.Write(data[:1])
if err == nil {
t.Fatal("expected overflow error")
}
}
// TestJoinReadAll verifies that data in excess of a single chunk is returned
// in its entirety.
func TestJoinReadAll(t *testing.T) {
var dataLength int64 = swarm.ChunkSize + 2
j := newMockJoiner(dataLength)
buf := bytes.NewBuffer(nil)
err := cmdfile.JoinReadAll(j, swarm.ZeroAddress, buf)
if err != nil {
t.Fatal(err)
}
if dataLength != int64(len(buf.Bytes())) {
t.Fatalf("expected length %d, got %d", dataLength, len(buf.Bytes()))
}
}
// mockJoiner is an implementation of file,Joiner that short-circuits that returns
// a mock byte vector of the length given at initialization.
type mockJoiner struct {
l int64
}
// Join implements file.Joiner.
func (j *mockJoiner) Join(ctx context.Context, address swarm.Address) (dataOut io.ReadCloser, dataLength int64, err error) {
data := make([]byte, j.l)
buf := bytes.NewBuffer(data)
readCloser := ioutil.NopCloser(buf)
return readCloser, j.l, nil
}
// newMockJoiner creates a new mockJoiner.
func newMockJoiner(l int64) file.Joiner {
return &mockJoiner{
l: l,
}
}
// newTestServer creates an http server to serve the bee http api endpoints.
func newTestServer(t *testing.T, storer storage.Storer) *url.URL {
t.Helper()
s := api.New(api.Options{
Storer: storer,
Logger: logging.New(ioutil.Discard, 0),
Tags: tags.NewTags(),
})
ts := httptest.NewServer(s)
srvUrl, err := url.Parse(ts.URL)
if err != nil {
t.Fatal(err)
}
return srvUrl
}
......@@ -100,6 +100,7 @@ type Chunk interface {
WithPinCounter(p uint64) Chunk
TagID() uint32
WithTagID(t uint32) Chunk
Equal(Chunk) bool
}
type chunk struct {
......@@ -142,8 +143,12 @@ func (c *chunk) TagID() uint32 {
return c.tagID
}
func (self *chunk) String() string {
return fmt.Sprintf("Address: %v Chunksize: %v", self.addr.String(), len(self.sdata))
func (c *chunk) String() string {
return fmt.Sprintf("Address: %v Chunksize: %v", c.addr.String(), len(c.sdata))
}
func (c *chunk) Equal(cp Chunk) bool {
return c.Address().Equal(cp.Address()) && bytes.Equal(c.Data(), cp.Data())
}
type ChunkValidator interface {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment