Commit 8206b490 authored by Alok Nerurkar's avatar Alok Nerurkar Committed by GitHub

Removing collections package and API cleanup (#1501)

parent 91a4a249
...@@ -5,31 +5,13 @@ ...@@ -5,31 +5,13 @@
package main package main
import ( import (
"bytes" "errors"
"context"
"encoding/json"
"fmt" "fmt"
"io"
"io/ioutil"
"os" "os"
"path/filepath"
cmdfile "github.com/ethersphere/bee/cmd/internal/file"
"github.com/ethersphere/bee/pkg/collection/entry"
"github.com/ethersphere/bee/pkg/file"
"github.com/ethersphere/bee/pkg/file/joiner"
"github.com/ethersphere/bee/pkg/file/splitter"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
const (
defaultMimeType = "application/octet-stream"
limitMetadataLength = swarm.ChunkSize
)
var ( var (
filename string // flag variable, filename to use in metadata filename string // flag variable, filename to use in metadata
mimeType string // flag variable, mime type to use in metadata mimeType string // flag variable, mime type to use in metadata
...@@ -41,186 +23,11 @@ var ( ...@@ -41,186 +23,11 @@ var (
ssl bool // flag variable, uses https for api if set ssl bool // flag variable, uses https for api if set
retrieve bool // flag variable, if set will resolve and retrieve referenced file retrieve bool // flag variable, if set will resolve and retrieve referenced file
verbosity string // flag variable, debug level verbosity string // flag variable, debug level
logger logging.Logger
) )
// getEntry handles retrieving and writing a file from the file entry
// referenced by the given address.
func getEntry(cmd *cobra.Command, args []string) (err error) {
// process the reference to retrieve
addr, err := swarm.ParseHexAddress(args[0])
if err != nil {
return err
}
// initialize interface with HTTP API
store := cmdfile.NewApiStore(host, port, ssl)
buf := bytes.NewBuffer(nil)
writeCloser := cmdfile.NopWriteCloser(buf)
limitBuf := cmdfile.NewLimitWriteCloser(writeCloser, limitMetadataLength)
j, _, err := joiner.New(cmd.Context(), store, addr)
if err != nil {
return err
}
_, err = file.JoinReadAll(cmd.Context(), j, limitBuf)
if err != nil {
return err
}
e := &entry.Entry{}
err = e.UnmarshalBinary(buf.Bytes())
if err != nil {
return err
}
j, _, err = joiner.New(cmd.Context(), store, e.Metadata())
if err != nil {
return err
}
buf = bytes.NewBuffer(nil)
_, err = file.JoinReadAll(cmd.Context(), j, buf)
if err != nil {
return err
}
// retrieve metadata
metaData := &entry.Metadata{}
err = json.Unmarshal(buf.Bytes(), metaData)
if err != nil {
return err
}
logger.Debugf("Filename: %s", metaData.Filename)
logger.Debugf("MIME-type: %s", metaData.MimeType)
if outDir == "" {
outDir = "."
} else {
err := os.MkdirAll(outDir, 0o777) // skipcq: GSC-G301
if err != nil {
return err
}
}
outFilePath := filepath.Join(outDir, metaData.Filename)
// create output dir if not exist
if outDir != "." {
err := os.MkdirAll(outDir, 0o777) // skipcq: GSC-G301
if err != nil {
return err
}
}
// protect any existing file unless explicitly told not to
outFileFlags := os.O_CREATE | os.O_WRONLY
if outFileForce {
outFileFlags |= os.O_TRUNC
} else {
outFileFlags |= os.O_EXCL
}
// open the file
outFile, err := os.OpenFile(outFilePath, outFileFlags, 0o666) // skipcq: GSC-G302
if err != nil {
return err
}
defer outFile.Close()
j, _, err = joiner.New(cmd.Context(), store, e.Reference())
if err != nil {
return err
}
_, err = file.JoinReadAll(cmd.Context(), j, outFile)
return err
}
// putEntry creates a new file entry with the given reference.
func putEntry(cmd *cobra.Command, args []string) (err error) {
// process the reference to retrieve
addr, err := swarm.ParseHexAddress(args[0])
if err != nil {
return err
}
// add the fsStore and/or apiStore, depending on flags
stores := cmdfile.NewTeeStore()
if outDir != "" {
err := os.MkdirAll(outDir, 0o777) // skipcq: GSC-G301
if err != nil {
return err
}
store := cmdfile.NewFsStore(outDir)
stores.Add(store)
}
if useHttp {
store := cmdfile.NewApiStore(host, port, ssl)
stores.Add(store)
}
// create metadata object, with defaults for missing values
if filename == "" {
filename = args[0]
}
if mimeType == "" {
mimeType = defaultMimeType
}
metadata := entry.NewMetadata(filename)
metadata.MimeType = mimeType
// serialize metadata and send it to splitter
metadataBytes, err := json.Marshal(metadata)
if err != nil {
return err
}
logger.Debugf("metadata contents: %s", metadataBytes)
// set up splitter to process the metadata
s := splitter.NewSimpleSplitter(stores, storage.ModePutUpload)
ctx := context.Background()
// first add metadata
metadataBuf := bytes.NewBuffer(metadataBytes)
metadataReader := io.LimitReader(metadataBuf, int64(len(metadataBytes)))
metadataReadCloser := ioutil.NopCloser(metadataReader)
metadataAddr, err := s.Split(ctx, metadataReadCloser, int64(len(metadataBytes)), false)
if err != nil {
return err
}
// create entry from given reference and metadata,
// serialize and send to splitter
fileEntry := entry.New(addr, metadataAddr)
fileEntryBytes, err := fileEntry.MarshalBinary()
if err != nil {
return err
}
fileEntryBuf := bytes.NewBuffer(fileEntryBytes)
fileEntryReader := io.LimitReader(fileEntryBuf, int64(len(fileEntryBytes)))
fileEntryReadCloser := ioutil.NopCloser(fileEntryReader)
fileEntryAddr, err := s.Split(ctx, fileEntryReadCloser, int64(len(fileEntryBytes)), false)
if err != nil {
return err
}
// output reference to file entry
cmd.Println(fileEntryAddr)
return nil
}
// Entry is the underlying procedure for the CLI command // Entry is the underlying procedure for the CLI command
func Entry(cmd *cobra.Command, args []string) (err error) { func Entry(cmd *cobra.Command, args []string) (err error) {
logger, err = cmdfile.SetLogger(cmd, verbosity) return errors.New("command is deprecated")
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
if retrieve {
return getEntry(cmd, args)
}
return putEntry(cmd, args)
} }
func main() { func main() {
......
...@@ -154,22 +154,31 @@ paths: ...@@ -154,22 +154,31 @@ paths:
default: default:
description: Default response description: Default response
"/files": "/bzz":
post: post:
summary: "Upload file" summary: "Upload file or a collection of files"
description: "In order to upload a collection, user can send a multipart request with all the files populated in the form data with appropriate headers.\n\n
User can also upload a tar file along with the swarm-collection header. This will upload the tar file after extracting the entire directory structure.\n\n
If the swarm-collection header is absent, all requests (including tar files) are considered as single file uploads.\n\n
A multipart request is treated as a collection regardless of whether the swarm-collection header is present. This means in order to serve single files
uploaded as a multipart request, the swarm-index-document header should be used with the name of the file."
tags: tags:
- File - File
- Collection
parameters: parameters:
- in: query - in: query
name: name name: name
schema: schema:
$ref: "SwarmCommon.yaml#/components/schemas/FileName" $ref: "SwarmCommon.yaml#/components/schemas/FileName"
required: false required: false
description: Filename description: Filename when uploading single file
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmTagParameter" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmTagParameter"
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmPinParameter" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmPinParameter"
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmEncryptParameter" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmEncryptParameter"
- $ref: "SwarmCommon.yaml#/components/parameters/ContentTypePreserved" - $ref: "SwarmCommon.yaml#/components/parameters/ContentTypePreserved"
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmCollection"
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmIndexDocumentParameter"
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmErrorDocumentParameter"
requestBody: requestBody:
content: content:
multipart/form-data: multipart/form-data:
...@@ -184,76 +193,6 @@ paths: ...@@ -184,76 +193,6 @@ paths:
schema: schema:
type: string type: string
format: binary format: binary
responses:
"200":
description: Ok
headers:
"swarm-tag":
$ref: "SwarmCommon.yaml#/components/headers/SwarmTag"
"etag":
$ref: "SwarmCommon.yaml#/components/headers/ETag"
content:
application/json:
schema:
$ref: "SwarmCommon.yaml#/components/schemas/ReferenceResponse"
"400":
$ref: "SwarmCommon.yaml#/components/responses/400"
"403":
$ref: "SwarmCommon.yaml#/components/responses/403"
"500":
$ref: "SwarmCommon.yaml#/components/responses/500"
default:
description: Default response
"/files/{reference}":
get:
summary: "Get referenced file"
tags:
- File
parameters:
- in: path
name: reference
schema:
$ref: "SwarmCommon.yaml#/components/schemas/SwarmReference"
required: true
description: Swarm address of content
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmRecoveryTargetsParameter"
responses:
"200":
description: Ok
headers:
"swarm-recovery-targets":
$ref: "SwarmCommon.yaml#/components/headers/SwarmRecoveryTargets"
"ETag":
$ref: "SwarmCommon.yaml#/components/headers/ETag"
content:
application/octet-stream:
schema:
type: string
format: binary
"400":
$ref: "SwarmCommon.yaml#/components/responses/400"
"404":
$ref: "SwarmCommon.yaml#/components/responses/404"
"500":
$ref: "SwarmCommon.yaml#/components/responses/500"
default:
description: Default response
"/dirs":
post:
summary: "Upload a collection"
tags:
- Collection
parameters:
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmTagParameter"
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmPinParameter"
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmEncryptParameter"
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmIndexDocumentParameter"
- $ref: "SwarmCommon.yaml#/components/parameters/SwarmErrorDocumentParameter"
- $ref: "SwarmCommon.yaml#/components/parameters/ContentTypePreserved"
requestBody:
content:
application/x-tar: application/x-tar:
schema: schema:
type: string type: string
...@@ -264,6 +203,8 @@ paths: ...@@ -264,6 +203,8 @@ paths:
headers: headers:
"swarm-tag": "swarm-tag":
$ref: "SwarmCommon.yaml#/components/headers/SwarmTag" $ref: "SwarmCommon.yaml#/components/headers/SwarmTag"
"etag":
$ref: "SwarmCommon.yaml#/components/headers/ETag"
content: content:
application/json: application/json:
schema: schema:
...@@ -279,7 +220,7 @@ paths: ...@@ -279,7 +220,7 @@ paths:
"/bzz/{reference}": "/bzz/{reference}":
get: get:
summary: "Get index document from a collection of files" summary: "Get file or index document from a collection of files"
tags: tags:
- Collection - Collection
parameters: parameters:
......
...@@ -493,6 +493,14 @@ components: ...@@ -493,6 +493,14 @@ components:
required: false required: false
description: Configure custom error document to be returned when a specified path can not be found in collection description: Configure custom error document to be returned when a specified path can not be found in collection
SwarmCollection:
in: header
name: swarm-collection
schema:
type: boolean
required: false
description: Upload file/files as a collection
responses: responses:
"204": "204":
description: The resource was deleted successfully. description: The resource was deleted successfully.
......
...@@ -40,6 +40,7 @@ const ( ...@@ -40,6 +40,7 @@ const (
SwarmErrorDocumentHeader = "Swarm-Error-Document" SwarmErrorDocumentHeader = "Swarm-Error-Document"
SwarmFeedIndexHeader = "Swarm-Feed-Index" SwarmFeedIndexHeader = "Swarm-Feed-Index"
SwarmFeedIndexNextHeader = "Swarm-Feed-Index-Next" SwarmFeedIndexNextHeader = "Swarm-Feed-Index-Next"
SwarmCollectionHeader = "Swarm-Collection"
) )
// The size of buffer used for prefetching content with Langos. // The size of buffer used for prefetching content with Langos.
...@@ -54,9 +55,20 @@ const ( ...@@ -54,9 +55,20 @@ const (
largeBufferFilesizeThreshold = 10 * 1000000 // ten megs largeBufferFilesizeThreshold = 10 * 1000000 // ten megs
) )
const (
contentTypeHeader = "Content-Type"
multiPartFormData = "multipart/form-data"
contentTypeTar = "application/x-tar"
)
var ( var (
errInvalidNameOrAddress = errors.New("invalid name or bzz address") errInvalidNameOrAddress = errors.New("invalid name or bzz address")
errNoResolver = errors.New("no resolver connected") errNoResolver = errors.New("no resolver connected")
invalidRequest = errors.New("could not validate request")
invalidContentType = errors.New("invalid content-type")
invalidContentLength = errors.New("invalid content-length")
directoryStoreError = errors.New("could not store directory")
fileStoreError = errors.New("could not store file")
) )
// Service is the API service interface. // Service is the API service interface.
......
...@@ -74,11 +74,20 @@ func TestBytes(t *testing.T) { ...@@ -74,11 +74,20 @@ func TestBytes(t *testing.T) {
}) })
t.Run("not found", func(t *testing.T) { t.Run("not found", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodGet, resource+"/abcd", http.StatusNotFound, jsonhttptest.Request(t, client, http.MethodGet, resource+"/0xabcd", http.StatusNotFound,
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Message: "Not Found", Message: "Not Found",
Code: http.StatusNotFound, Code: http.StatusNotFound,
}), }),
) )
}) })
t.Run("internal error", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodGet, resource+"/abcd", http.StatusInternalServerError,
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Message: "Internal Server Error",
Code: http.StatusInternalServerError,
}),
)
})
} }
...@@ -5,34 +5,233 @@ ...@@ -5,34 +5,233 @@
package api package api
import ( import (
"bytes"
"context" "context"
"encoding/hex" "encoding/hex"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"io"
"io/ioutil"
"mime"
"net/http" "net/http"
"os"
"path" "path"
"strconv"
"strings" "strings"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/ethersphere/bee/pkg/collection/entry"
"github.com/ethersphere/bee/pkg/feeds" "github.com/ethersphere/bee/pkg/feeds"
"github.com/ethersphere/bee/pkg/file"
"github.com/ethersphere/bee/pkg/file/joiner" "github.com/ethersphere/bee/pkg/file/joiner"
"github.com/ethersphere/bee/pkg/file/loadsave" "github.com/ethersphere/bee/pkg/file/loadsave"
"github.com/ethersphere/bee/pkg/jsonhttp" "github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/manifest" "github.com/ethersphere/bee/pkg/manifest"
"github.com/ethersphere/bee/pkg/manifest/mantaray"
"github.com/ethersphere/bee/pkg/sctx" "github.com/ethersphere/bee/pkg/sctx"
"github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/tracing" "github.com/ethersphere/bee/pkg/tracing"
"github.com/ethersphere/langos"
) )
func (s *server) bzzUploadHandler(w http.ResponseWriter, r *http.Request) {
logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger)
contentType := r.Header.Get(contentTypeHeader)
mediaType, _, err := mime.ParseMediaType(contentType)
if err != nil {
logger.Debugf("bzz upload: parse content type header %q: %v", contentType, err)
logger.Errorf("bzz upload: parse content type header %q", contentType)
jsonhttp.BadRequest(w, invalidContentType)
return
}
isDir := r.Header.Get(SwarmCollectionHeader)
if strings.ToLower(isDir) == "true" || mediaType == multiPartFormData {
s.dirUploadHandler(w, r)
return
}
s.fileUploadHandler(w, r)
}
// fileUploadResponse is returned when an HTTP request to upload a file is successful
type bzzUploadResponse struct {
Reference swarm.Address `json:"reference"`
}
// fileUploadHandler uploads the file and its metadata supplied in the file body and
// the headers
func (s *server) fileUploadHandler(w http.ResponseWriter, r *http.Request) {
logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger)
var (
reader io.Reader
fileName, contentLength string
fileSize uint64
)
// Content-Type has already been validated by this time
contentType := r.Header.Get(contentTypeHeader)
tag, created, err := s.getOrCreateTag(r.Header.Get(SwarmTagHeader))
if err != nil {
logger.Debugf("bzz upload file: get or create tag: %v", err)
logger.Error("bzz upload file: get or create tag")
jsonhttp.InternalServerError(w, nil)
return
}
if !created {
// only in the case when tag is sent via header (i.e. not created by this request)
if estimatedTotalChunks := requestCalculateNumberOfChunks(r); estimatedTotalChunks > 0 {
err = tag.IncN(tags.TotalChunks, estimatedTotalChunks)
if err != nil {
s.logger.Debugf("bzz upload file: increment tag: %v", err)
s.logger.Error("bzz upload file: increment tag")
jsonhttp.InternalServerError(w, nil)
return
}
}
}
// Add the tag to the context
ctx := sctx.SetTag(r.Context(), tag)
fileName = r.URL.Query().Get("name")
contentLength = r.Header.Get("Content-Length")
reader = r.Body
if contentLength != "" {
fileSize, err = strconv.ParseUint(contentLength, 10, 64)
if err != nil {
logger.Debugf("bzz upload file: content length, file %q: %v", fileName, err)
logger.Errorf("bzz upload file: content length, file %q", fileName)
jsonhttp.BadRequest(w, invalidContentLength)
return
}
} else {
// copy the part to a tmp file to get its size
tmp, err := ioutil.TempFile("", "bee-multipart")
if err != nil {
logger.Debugf("bzz upload file: create temporary file: %v", err)
logger.Errorf("bzz upload file: create temporary file")
jsonhttp.InternalServerError(w, nil)
return
}
defer os.Remove(tmp.Name())
defer tmp.Close()
n, err := io.Copy(tmp, reader)
if err != nil {
logger.Debugf("bzz upload file: write temporary file: %v", err)
logger.Error("bzz upload file: write temporary file")
jsonhttp.InternalServerError(w, nil)
return
}
if _, err := tmp.Seek(0, io.SeekStart); err != nil {
logger.Debugf("bzz upload file: seek to beginning of temporary file: %v", err)
logger.Error("bzz upload file: seek to beginning of temporary file")
jsonhttp.InternalServerError(w, nil)
return
}
fileSize = uint64(n)
reader = tmp
}
p := requestPipelineFn(s.storer, r)
// first store the file and get its reference
fr, err := p(ctx, reader, int64(fileSize))
if err != nil {
logger.Debugf("bzz upload file: file store, file %q: %v", fileName, err)
logger.Errorf("bzz upload file: file store, file %q", fileName)
jsonhttp.InternalServerError(w, fileStoreError)
return
}
// If filename is still empty, use the file hash as the filename
if fileName == "" {
fileName = fr.String()
}
encrypt := requestEncrypt(r)
l := loadsave.New(s.storer, requestModePut(r), encrypt)
m, err := manifest.NewDefaultManifest(l, encrypt)
if err != nil {
logger.Debugf("bzz upload file: create manifest, file %q: %v", fileName, err)
logger.Errorf("bzz upload file: create manifest, file %q", fileName)
jsonhttp.InternalServerError(w, nil)
return
}
rootMetadata := map[string]string{
manifest.WebsiteIndexDocumentSuffixKey: fileName,
}
err = m.Add(ctx, manifest.RootPath, manifest.NewEntry(swarm.ZeroAddress, rootMetadata))
if err != nil {
logger.Debugf("bzz upload file: adding metadata to manifest, file %q: %v", fileName, err)
logger.Errorf("bzz upload file: adding metadata to manifest, file %q", fileName)
jsonhttp.InternalServerError(w, nil)
return
}
fileMtdt := map[string]string{
manifest.EntryMetadataContentTypeKey: contentType,
manifest.EntryMetadataFilenameKey: fileName,
}
err = m.Add(ctx, fileName, manifest.NewEntry(fr, fileMtdt))
if err != nil {
logger.Debugf("bzz upload file: adding file to manifest, file %q: %v", fileName, err)
logger.Errorf("bzz upload file: adding file to manifest, file %q", fileName)
jsonhttp.InternalServerError(w, nil)
return
}
logger.Debugf("Uploading file Encrypt: %v Filename: %s Filehash: %s FileMtdt: %v",
encrypt, fileName, fr.String(), fileMtdt)
storeSizeFn := []manifest.StoreSizeFunc{}
if !created {
// only in the case when tag is sent via header (i.e. not created by this request)
// each content that is saved for manifest
storeSizeFn = append(storeSizeFn, func(dataSize int64) error {
if estimatedTotalChunks := calculateNumberOfChunks(dataSize, encrypt); estimatedTotalChunks > 0 {
err = tag.IncN(tags.TotalChunks, estimatedTotalChunks)
if err != nil {
return fmt.Errorf("increment tag: %w", err)
}
}
return nil
})
}
manifestReference, err := m.Store(ctx, storeSizeFn...)
if err != nil {
logger.Debugf("bzz upload file: manifest store, file %q: %v", fileName, err)
logger.Errorf("bzz upload file: manifest store, file %q", fileName)
jsonhttp.InternalServerError(w, nil)
return
}
logger.Debugf("Manifest Reference: %s", manifestReference.String())
if created {
_, err = tag.DoneSplit(manifestReference)
if err != nil {
logger.Debugf("bzz upload file: done split: %v", err)
logger.Error("bzz upload file: done split failed")
jsonhttp.InternalServerError(w, nil)
return
}
}
w.Header().Set("ETag", fmt.Sprintf("%q", manifestReference.String()))
w.Header().Set(SwarmTagHeader, fmt.Sprint(tag.Uid))
w.Header().Set("Access-Control-Expose-Headers", SwarmTagHeader)
jsonhttp.OK(w, bzzUploadResponse{
Reference: manifestReference,
})
}
func (s *server) bzzDownloadHandler(w http.ResponseWriter, r *http.Request) { func (s *server) bzzDownloadHandler(w http.ResponseWriter, r *http.Request) {
logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger) logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger)
ls := loadsave.New(s.storer, storage.ModePutRequest, false) ls := loadsave.New(s.storer, storage.ModePutRequest, false)
...@@ -62,19 +261,13 @@ func (s *server) bzzDownloadHandler(w http.ResponseWriter, r *http.Request) { ...@@ -62,19 +261,13 @@ func (s *server) bzzDownloadHandler(w http.ResponseWriter, r *http.Request) {
FETCH: FETCH:
// read manifest entry // read manifest entry
j, _, err := joiner.New(ctx, s.storer, address) m, err := manifest.NewDefaultManifestReference(
if err != nil { address,
logger.Debugf("bzz download: joiner manifest entry %s: %v", address, err) ls,
logger.Errorf("bzz download: joiner %s", address) )
jsonhttp.NotFound(w, nil)
return
}
buf := bytes.NewBuffer(nil)
_, err = file.JoinReadAll(ctx, j, buf)
if err != nil { if err != nil {
logger.Debugf("bzz download: read entry %s: %v", address, err) logger.Debugf("bzz download: not manifest %s: %v", address, err)
logger.Errorf("bzz download: read entry %s", address) logger.Error("bzz download: not manifest")
jsonhttp.NotFound(w, nil) jsonhttp.NotFound(w, nil)
return return
} }
...@@ -84,7 +277,7 @@ FETCH: ...@@ -84,7 +277,7 @@ FETCH:
// unmarshal as mantaray first and possibly resolve the feed, otherwise // unmarshal as mantaray first and possibly resolve the feed, otherwise
// go on normally. // go on normally.
if !feedDereferenced { if !feedDereferenced {
if l, err := s.manifestFeed(ctx, ls, buf.Bytes()); err == nil { if l, err := s.manifestFeed(ctx, m); err == nil {
//we have a feed manifest here //we have a feed manifest here
ch, cur, _, err := l.At(ctx, time.Now().Unix(), 0) ch, cur, _, err := l.At(ctx, time.Now().Unix(), 0)
if err != nil { if err != nil {
...@@ -125,66 +318,18 @@ FETCH: ...@@ -125,66 +318,18 @@ FETCH:
goto FETCH goto FETCH
} }
} }
e := &entry.Entry{}
err = e.UnmarshalBinary(buf.Bytes())
if err != nil {
logger.Debugf("bzz download: unmarshal entry %s: %v", address, err)
logger.Errorf("bzz download: unmarshal entry %s", address)
jsonhttp.NotFound(w, nil)
return
}
// read metadata
j, _, err = joiner.New(ctx, s.storer, e.Metadata())
if err != nil {
logger.Debugf("bzz download: joiner metadata %s: %v", address, err)
logger.Errorf("bzz download: joiner %s", address)
jsonhttp.NotFound(w, nil)
return
}
// read metadata
buf = bytes.NewBuffer(nil)
_, err = file.JoinReadAll(ctx, j, buf)
if err != nil {
logger.Debugf("bzz download: read metadata %s: %v", address, err)
logger.Errorf("bzz download: read metadata %s", address)
jsonhttp.NotFound(w, nil)
return
}
manifestMetadata := &entry.Metadata{}
err = json.Unmarshal(buf.Bytes(), manifestMetadata)
if err != nil {
logger.Debugf("bzz download: unmarshal metadata %s: %v", address, err)
logger.Errorf("bzz download: unmarshal metadata %s", address)
jsonhttp.NotFound(w, nil)
return
}
// we are expecting manifest Mime type here
m, err := manifest.NewManifestReference(
manifestMetadata.MimeType,
e.Reference(),
ls,
)
if err != nil {
logger.Debugf("bzz download: not manifest %s: %v", address, err)
logger.Error("bzz download: not manifest")
jsonhttp.NotFound(w, nil)
return
}
if pathVar == "" { if pathVar == "" {
logger.Tracef("bzz download: handle empty path %s", address) logger.Tracef("bzz download: handle empty path %s", address)
if indexDocumentSuffixKey, ok := manifestMetadataLoad(ctx, m, manifestRootPath, manifestWebsiteIndexDocumentSuffixKey); ok { if indexDocumentSuffixKey, ok := manifestMetadataLoad(ctx, m, manifest.RootPath, manifest.WebsiteIndexDocumentSuffixKey); ok {
pathWithIndex := path.Join(pathVar, indexDocumentSuffixKey) pathWithIndex := path.Join(pathVar, indexDocumentSuffixKey)
indexDocumentManifestEntry, err := m.Lookup(ctx, pathWithIndex) indexDocumentManifestEntry, err := m.Lookup(ctx, pathWithIndex)
if err == nil { if err == nil {
// index document exists // index document exists
logger.Debugf("bzz download: serving path: %s", pathWithIndex) logger.Debugf("bzz download: serving path: %s", pathWithIndex)
s.serveManifestEntry(w, r, address, indexDocumentManifestEntry.Reference(), !feedDereferenced) s.serveManifestEntry(w, r, address, indexDocumentManifestEntry, !feedDereferenced)
return return
} }
} }
...@@ -215,7 +360,7 @@ FETCH: ...@@ -215,7 +360,7 @@ FETCH:
} }
// check index suffix path // check index suffix path
if indexDocumentSuffixKey, ok := manifestMetadataLoad(ctx, m, manifestRootPath, manifestWebsiteIndexDocumentSuffixKey); ok { if indexDocumentSuffixKey, ok := manifestMetadataLoad(ctx, m, manifest.RootPath, manifest.WebsiteIndexDocumentSuffixKey); ok {
if !strings.HasSuffix(pathVar, indexDocumentSuffixKey) { if !strings.HasSuffix(pathVar, indexDocumentSuffixKey) {
// check if path is directory with index // check if path is directory with index
pathWithIndex := path.Join(pathVar, indexDocumentSuffixKey) pathWithIndex := path.Join(pathVar, indexDocumentSuffixKey)
...@@ -224,21 +369,21 @@ FETCH: ...@@ -224,21 +369,21 @@ FETCH:
// index document exists // index document exists
logger.Debugf("bzz download: serving path: %s", pathWithIndex) logger.Debugf("bzz download: serving path: %s", pathWithIndex)
s.serveManifestEntry(w, r, address, indexDocumentManifestEntry.Reference(), !feedDereferenced) s.serveManifestEntry(w, r, address, indexDocumentManifestEntry, !feedDereferenced)
return return
} }
} }
} }
// check if error document is to be shown // check if error document is to be shown
if errorDocumentPath, ok := manifestMetadataLoad(ctx, m, manifestRootPath, manifestWebsiteErrorDocumentPathKey); ok { if errorDocumentPath, ok := manifestMetadataLoad(ctx, m, manifest.RootPath, manifest.WebsiteErrorDocumentPathKey); ok {
if pathVar != errorDocumentPath { if pathVar != errorDocumentPath {
errorDocumentManifestEntry, err := m.Lookup(ctx, errorDocumentPath) errorDocumentManifestEntry, err := m.Lookup(ctx, errorDocumentPath)
if err == nil { if err == nil {
// error document exists // error document exists
logger.Debugf("bzz download: serving path: %s", errorDocumentPath) logger.Debugf("bzz download: serving path: %s", errorDocumentPath)
s.serveManifestEntry(w, r, address, errorDocumentManifestEntry.Reference(), !feedDereferenced) s.serveManifestEntry(w, r, address, errorDocumentManifestEntry, !feedDereferenced)
return return
} }
} }
...@@ -252,81 +397,76 @@ FETCH: ...@@ -252,81 +397,76 @@ FETCH:
} }
// serve requested path // serve requested path
s.serveManifestEntry(w, r, address, me.Reference(), !feedDereferenced) s.serveManifestEntry(w, r, address, me, !feedDereferenced)
} }
func (s *server) serveManifestEntry(w http.ResponseWriter, r *http.Request, address, manifestEntryAddress swarm.Address, etag bool) { func (s *server) serveManifestEntry(
var ( w http.ResponseWriter,
logger = tracing.NewLoggerWithTraceID(r.Context(), s.logger) r *http.Request,
ctx = r.Context() address swarm.Address,
buf = bytes.NewBuffer(nil) manifestEntry manifest.Entry,
) etag bool,
) {
// read file entry
j, _, err := joiner.New(ctx, s.storer, manifestEntryAddress) additionalHeaders := http.Header{}
if err != nil { mtdt := manifestEntry.Metadata()
logger.Debugf("bzz download: joiner read file entry %s: %v", address, err) if fname, ok := mtdt[manifest.EntryMetadataFilenameKey]; ok {
logger.Errorf("bzz download: joiner read file entry %s", address) additionalHeaders["Content-Disposition"] =
jsonhttp.NotFound(w, nil) []string{fmt.Sprintf("inline; filename=\"%s\"", fname)}
return
} }
if mimeType, ok := mtdt[manifest.EntryMetadataContentTypeKey]; ok {
_, err = file.JoinReadAll(ctx, j, buf) additionalHeaders["Content-Type"] = []string{mimeType}
if err != nil {
logger.Debugf("bzz download: read file entry %s: %v", address, err)
logger.Errorf("bzz download: read file entry %s", address)
jsonhttp.NotFound(w, nil)
return
} }
fe := &entry.Entry{}
err = fe.UnmarshalBinary(buf.Bytes()) s.downloadHandler(w, r, manifestEntry.Reference(), additionalHeaders, etag)
if err != nil { }
logger.Debugf("bzz download: unmarshal file entry %s: %v", address, err)
logger.Errorf("bzz download: unmarshal file entry %s", address) // downloadHandler contains common logic for dowloading Swarm file from API
jsonhttp.NotFound(w, nil) func (s *server) downloadHandler(w http.ResponseWriter, r *http.Request, reference swarm.Address, additionalHeaders http.Header, etag bool) {
return logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger)
targets := r.URL.Query().Get("targets")
if targets != "" {
r = r.WithContext(sctx.SetTargets(r.Context(), targets))
} }
// read file metadata reader, l, err := joiner.New(r.Context(), s.storer, reference)
j, _, err = joiner.New(ctx, s.storer, fe.Metadata())
if err != nil { if err != nil {
logger.Debugf("bzz download: joiner read file entry %s: %v", address, err) if errors.Is(err, storage.ErrNotFound) {
logger.Errorf("bzz download: joiner read file entry %s", address) logger.Debugf("api download: not found %s: %v", reference, err)
jsonhttp.NotFound(w, nil) logger.Error("api download: not found")
jsonhttp.NotFound(w, nil)
return
}
logger.Debugf("api download: unexpected error %s: %v", reference, err)
logger.Error("api download: unexpected error")
jsonhttp.InternalServerError(w, nil)
return return
} }
buf = bytes.NewBuffer(nil) // include additional headers
_, err = file.JoinReadAll(ctx, j, buf) for name, values := range additionalHeaders {
if err != nil { w.Header().Set(name, strings.Join(values, "; "))
logger.Debugf("bzz download: read file metadata %s: %v", address, err)
logger.Errorf("bzz download: read file metadata %s", address)
jsonhttp.NotFound(w, nil)
return
} }
fileMetadata := &entry.Metadata{} if etag {
err = json.Unmarshal(buf.Bytes(), fileMetadata) w.Header().Set("ETag", fmt.Sprintf("%q", reference))
if err != nil {
logger.Debugf("bzz download: unmarshal metadata %s: %v", address, err)
logger.Errorf("bzz download: unmarshal metadata %s", address)
jsonhttp.NotFound(w, nil)
return
} }
w.Header().Set("Content-Length", fmt.Sprintf("%d", l))
additionalHeaders := http.Header{ w.Header().Set("Decompressed-Content-Length", fmt.Sprintf("%d", l))
"Content-Disposition": {fmt.Sprintf("inline; filename=\"%s\"", fileMetadata.Filename)}, w.Header().Set("Access-Control-Expose-Headers", "Content-Disposition")
"Content-Type": {fileMetadata.MimeType}, if targets != "" {
w.Header().Set(TargetsRecoveryHeader, targets)
} }
http.ServeContent(w, r, "", time.Now(), langos.NewBufferedLangos(reader, lookaheadBufferSize(l)))
fileEntryAddress := fe.Reference()
s.downloadHandler(w, r, fileEntryAddress, additionalHeaders, etag)
} }
// manifestMetadataLoad returns the value for a key stored in the metadata of // manifestMetadataLoad returns the value for a key stored in the metadata of
// manifest path, or empty string if no value is present. // manifest path, or empty string if no value is present.
// The ok result indicates whether value was found in the metadata. // The ok result indicates whether value was found in the metadata.
func manifestMetadataLoad(ctx context.Context, manifest manifest.Interface, path, metadataKey string) (string, bool) { func manifestMetadataLoad(
ctx context.Context,
manifest manifest.Interface,
path, metadataKey string,
) (string, bool) {
me, err := manifest.Lookup(ctx, path) me, err := manifest.Lookup(ctx, path)
if err != nil { if err != nil {
return "", false return "", false
...@@ -340,14 +480,11 @@ func manifestMetadataLoad(ctx context.Context, manifest manifest.Interface, path ...@@ -340,14 +480,11 @@ func manifestMetadataLoad(ctx context.Context, manifest manifest.Interface, path
return "", false return "", false
} }
func (s *server) manifestFeed(ctx context.Context, ls file.LoadSaver, candidate []byte) (feeds.Lookup, error) { func (s *server) manifestFeed(
node := new(mantaray.Node) ctx context.Context,
err := node.UnmarshalBinary(candidate) m manifest.Interface,
if err != nil { ) (feeds.Lookup, error) {
return nil, fmt.Errorf("node unmarshal: %w", err) e, err := m.Lookup(ctx, "/")
}
e, err := node.LookupNode(context.Background(), []byte("/"), ls)
if err != nil { if err != nil {
return nil, fmt.Errorf("node lookup: %w", err) return nil, fmt.Errorf("node lookup: %w", err)
} }
...@@ -374,6 +511,9 @@ func (s *server) manifestFeed(ctx context.Context, ls file.LoadSaver, candidate ...@@ -374,6 +511,9 @@ func (s *server) manifestFeed(ctx context.Context, ls file.LoadSaver, candidate
return nil, err return nil, err
} }
} }
if len(owner) == 0 || len(topic) == 0 {
return nil, fmt.Errorf("node lookup: %s", "feed metadata absent")
}
f := feeds.New(topic, common.BytesToAddress(owner)) f := feeds.New(topic, common.BytesToAddress(owner))
return s.feedFactory.NewLookup(*t, f) return s.feedFactory.NewLookup(*t, f)
} }
...@@ -7,20 +7,18 @@ package api_test ...@@ -7,20 +7,18 @@ package api_test
import ( import (
"bytes" "bytes"
"context" "context"
"encoding/hex"
"encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"mime" "mime"
"mime/multipart"
"net/http" "net/http"
"strconv"
"strings" "strings"
"testing" "testing"
"github.com/ethersphere/bee/pkg/api" "github.com/ethersphere/bee/pkg/api"
"github.com/ethersphere/bee/pkg/collection/entry"
"github.com/ethersphere/bee/pkg/file/loadsave" "github.com/ethersphere/bee/pkg/file/loadsave"
"github.com/ethersphere/bee/pkg/file/pipeline/builder"
"github.com/ethersphere/bee/pkg/jsonhttp" "github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest" "github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest"
"github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/logging"
...@@ -32,144 +30,395 @@ import ( ...@@ -32,144 +30,395 @@ import (
"github.com/ethersphere/bee/pkg/tags" "github.com/ethersphere/bee/pkg/tags"
) )
func TestBzz(t *testing.T) { func TestBzzFiles(t *testing.T) {
var ( var (
bzzDownloadResource = func(addr, path string) string { return "/bzz/" + addr + "/" + path } fileUploadResource = "/bzz"
storer = smock.NewStorer() targets = "0x222"
ctx = context.Background() fileDownloadResource = func(addr string) string { return "/bzz/" + addr }
mockStatestore = statestore.NewStateStore() simpleData = []byte("this is a simple text")
logger = logging.New(ioutil.Discard, 0) mockStatestore = statestore.NewStateStore()
client, _, _ = newTestServer(t, testServerOptions{ logger = logging.New(ioutil.Discard, 0)
Storer: storer, client, _, _ = newTestServer(t, testServerOptions{
Storer: smock.NewStorer(),
Tags: tags.NewTags(mockStatestore, logger), Tags: tags.NewTags(mockStatestore, logger),
Logger: logging.New(ioutil.Discard, 5), Logger: logger,
}) })
pipeWriteAll = func(r io.Reader, l int64) (swarm.Address, error) {
pipe := builder.NewPipelineBuilder(ctx, storer, storage.ModePutUpload, false)
return builder.FeedPipeline(ctx, pipe, r, l)
}
) )
t.Run("download-file-by-path", func(t *testing.T) {
fileName := "sample.html"
filePath := "test/" + fileName
missingFilePath := "test/missing"
sampleHtml := `<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>`
var err error t.Run("invalid-content-type", func(t *testing.T) {
var fileContentReference swarm.Address jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource,
var fileReference swarm.Address http.StatusBadRequest,
var manifestFileReference swarm.Address jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Message: api.InvalidContentType.Error(),
Code: http.StatusBadRequest,
}),
)
})
// save file t.Run("tar-file-upload", func(t *testing.T) {
fileContentReference, err = pipeWriteAll(strings.NewReader(sampleHtml), int64(len(sampleHtml))) tr := tarFiles(t, []f{
{
data: []byte("robots text"),
name: "robots.txt",
dir: "",
header: http.Header{
"Content-Type": {"text/plain; charset=utf-8"},
},
},
{
data: []byte("image 1"),
name: "1.png",
dir: "img",
header: http.Header{
"Content-Type": {"image/png"},
},
},
{
data: []byte("image 2"),
name: "2.png",
dir: "img",
header: http.Header{
"Content-Type": {"image/png"},
},
},
})
rootHash := "f30c0aa7e9e2a0ef4c9b1b750ebfeaeb7c7c24da700bb089da19a46e3677824b"
jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource, http.StatusOK,
jsonhttptest.WithRequestBody(tr),
jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar),
jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
}),
)
})
if err != nil { t.Run("encrypt-decrypt", func(t *testing.T) {
t.Fatal(err) fileName := "my-pictures.jpeg"
}
fileMetadata := entry.NewMetadata(fileName) var resp api.BzzUploadResponse
fileMetadata.MimeType = "text/html; charset=utf-8" jsonhttptest.Request(t, client, http.MethodPost,
fileMetadataBytes, err := json.Marshal(fileMetadata) fileUploadResource+"?name="+fileName, http.StatusOK,
if err != nil { jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
t.Fatal(err) jsonhttptest.WithRequestHeader(api.SwarmEncryptHeader, "True"),
} jsonhttptest.WithRequestHeader("Content-Type", "image/jpeg; charset=utf-8"),
jsonhttptest.WithUnmarshalJSONResponse(&resp),
)
fileMetadataReference, err := pipeWriteAll(bytes.NewReader(fileMetadataBytes), int64(len(fileMetadataBytes))) rootHash := resp.Reference.String()
rcvdHeader := jsonhttptest.Request(t, client, http.MethodGet,
fileDownloadResource(rootHash), http.StatusOK,
jsonhttptest.WithExpectedResponse(simpleData),
)
cd := rcvdHeader.Get("Content-Disposition")
_, params, err := mime.ParseMediaType(cd)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if params["filename"] != fileName {
fe := entry.New(fileContentReference, fileMetadataReference) t.Fatal("Invalid file name detected")
fileEntryBytes, err := fe.MarshalBinary()
if err != nil {
t.Fatal(err)
} }
fileReference, err = pipeWriteAll(bytes.NewReader(fileEntryBytes), int64(len(fileEntryBytes))) if rcvdHeader.Get("Content-Type") != "image/jpeg; charset=utf-8" {
t.Fatal("Invalid content type detected")
if err != nil {
t.Fatal(err)
} }
})
// save manifest t.Run("check-content-type-detection", func(t *testing.T) {
m, err := manifest.NewDefaultManifest(loadsave.New(storer, storage.ModePutRequest, false), false) fileName := "my-pictures.jpeg"
if err != nil { rootHash := "4f9146b3813ccbd7ce45a18be23763d7e436ab7a3982ef39961c6f3cd4da1dcf"
t.Fatal(err)
}
e := manifest.NewEntry(fileReference, nil) jsonhttptest.Request(t, client, http.MethodPost,
fileUploadResource+"?name="+fileName, http.StatusOK,
jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
}),
jsonhttptest.WithRequestHeader("Content-Type", "image/jpeg; charset=utf-8"),
)
err = m.Add(ctx, filePath, e) rcvdHeader := jsonhttptest.Request(t, client, http.MethodGet,
fileDownloadResource(rootHash), http.StatusOK,
jsonhttptest.WithExpectedResponse(simpleData),
)
cd := rcvdHeader.Get("Content-Disposition")
_, params, err := mime.ParseMediaType(cd)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if params["filename"] != fileName {
manifestBytesReference, err := m.Store(ctx) t.Fatal("Invalid file name detected")
if err != nil {
t.Fatal(err)
} }
if rcvdHeader.Get("Content-Type") != "image/jpeg; charset=utf-8" {
metadata := entry.NewMetadata(manifestBytesReference.String()) t.Fatal("Invalid content type detected")
metadata.MimeType = m.Type()
metadataBytes, err := json.Marshal(metadata)
if err != nil {
t.Fatal(err)
} }
})
mr, err := pipeWriteAll(bytes.NewReader(metadataBytes), int64(len(metadataBytes))) t.Run("upload-then-download-and-check-data", func(t *testing.T) {
if err != nil { fileName := "sample.html"
t.Fatal(err) rootHash := "36e6c1bbdfee6ac21485d5f970479fd1df458d36df9ef4e8179708ed46da557f"
} sampleHtml := `<!DOCTYPE html>
<html>
<body>
// now join both references (fr,mr) to create an entry and store it. <h1>My First Heading</h1>
newEntry := entry.New(manifestBytesReference, mr)
manifestFileEntryBytes, err := newEntry.MarshalBinary()
if err != nil {
t.Fatal(err)
}
manifestFileReference, err = pipeWriteAll(bytes.NewReader(manifestFileEntryBytes), int64(len(manifestFileEntryBytes))) <p>My first paragraph.</p>
if err != nil {
t.Fatal(err) </body>
} </html>`
rcvdHeader := jsonhttptest.Request(t, client, http.MethodPost,
fileUploadResource+"?name="+fileName, http.StatusOK,
jsonhttptest.WithRequestBody(strings.NewReader(sampleHtml)),
jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
}),
jsonhttptest.WithRequestHeader("Content-Type", "text/html; charset=utf-8"),
)
// read file from manifest path if rcvdHeader.Get("ETag") != fmt.Sprintf("%q", rootHash) {
t.Fatal("Invalid ETags header received")
}
rcvdHeader := jsonhttptest.Request(t, client, http.MethodGet, bzzDownloadResource(manifestFileReference.String(), filePath), http.StatusOK, // try to fetch the same file and check the data
rcvdHeader = jsonhttptest.Request(t, client, http.MethodGet,
fileDownloadResource(rootHash), http.StatusOK,
jsonhttptest.WithExpectedResponse([]byte(sampleHtml)), jsonhttptest.WithExpectedResponse([]byte(sampleHtml)),
) )
// check the headers
cd := rcvdHeader.Get("Content-Disposition") cd := rcvdHeader.Get("Content-Disposition")
_, params, err := mime.ParseMediaType(cd) _, params, err := mime.ParseMediaType(cd)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if params["filename"] != fileName { if params["filename"] != fileName {
t.Fatal("Invalid file name detected") t.Fatal("Invalid filename detected")
}
if rcvdHeader.Get("ETag") != fmt.Sprintf("%q", fileContentReference) {
t.Fatal("Invalid ETags header received")
} }
if rcvdHeader.Get("Content-Type") != "text/html; charset=utf-8" { if rcvdHeader.Get("Content-Type") != "text/html; charset=utf-8" {
t.Fatal("Invalid content type detected") t.Fatal("Invalid content type detected")
} }
// check on invalid path })
t.Run("upload-then-download-with-targets", func(t *testing.T) {
fileName := "simple_file.txt"
rootHash := "65148cd89b58e91616773f5acea433f7b5a6274f2259e25f4893a332b74a7e28"
jsonhttptest.Request(t, client, http.MethodGet, bzzDownloadResource(manifestFileReference.String(), missingFilePath), http.StatusNotFound, jsonhttptest.Request(t, client, http.MethodPost,
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ fileUploadResource+"?name="+fileName, http.StatusOK,
Message: "path address not found", jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
Code: http.StatusNotFound, jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
}), }),
jsonhttptest.WithRequestHeader("Content-Type", "text/html; charset=utf-8"),
) )
rcvdHeader := jsonhttptest.Request(t, client, http.MethodGet,
fileDownloadResource(rootHash)+"?targets="+targets, http.StatusOK,
jsonhttptest.WithExpectedResponse(simpleData),
)
if rcvdHeader.Get(api.TargetsRecoveryHeader) != targets {
t.Fatalf("targets mismatch. got %s, want %s",
rcvdHeader.Get(api.TargetsRecoveryHeader), targets)
}
}) })
}
// TestRangeRequests validates that all endpoints are serving content with
// respect to HTTP Range headers.
func TestBzzFilesRangeRequests(t *testing.T) {
data := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus dignissim tincidunt orci id aliquam. Praesent eget turpis in lectus semper consectetur et ut nibh. Nam rhoncus, augue sit amet sollicitudin lacinia, turpis tortor molestie urna, at mattis sem sapien sit amet augue. In bibendum ex vel odio dignissim interdum. Quisque hendrerit sapien et porta condimentum. Vestibulum efficitur mauris tellus, eget vestibulum sapien vulputate ac. Proin et vulputate sapien. Duis tincidunt mauris vulputate porta venenatis. Sed dictum aliquet urna, sit amet fermentum velit pellentesque vitae. Nam sed nisi ultrices, volutpat quam et, malesuada sapien. Nunc gravida non orci at rhoncus. Sed vitae dui accumsan, venenatis lectus et, mattis tellus. Proin sed mauris eu mi congue lacinia.")
uploads := []struct {
name string
uploadEndpoint string
downloadEndpoint string
filepath string
reader io.Reader
contentType string
}{
{
name: "bytes",
uploadEndpoint: "/bytes",
downloadEndpoint: "/bytes",
reader: bytes.NewReader(data),
contentType: "text/plain; charset=utf-8",
},
{
name: "file",
uploadEndpoint: "/bzz",
downloadEndpoint: "/bzz",
reader: bytes.NewReader(data),
contentType: "text/plain; charset=utf-8",
},
{
name: "dir",
uploadEndpoint: "/bzz",
downloadEndpoint: "/bzz",
filepath: "ipsum/lorem.txt",
reader: tarFiles(t, []f{
{
data: data,
name: "lorem.txt",
dir: "ipsum",
header: http.Header{
"Content-Type": {"text/plain; charset=utf-8"},
},
},
}),
contentType: api.ContentTypeTar,
},
}
ranges := []struct {
name string
ranges [][2]int
}{
{
name: "all",
ranges: [][2]int{{0, len(data)}},
},
{
name: "all without end",
ranges: [][2]int{{0, -1}},
},
{
name: "all without start",
ranges: [][2]int{{-1, len(data)}},
},
{
name: "head",
ranges: [][2]int{{0, 50}},
},
{
name: "tail",
ranges: [][2]int{{250, len(data)}},
},
{
name: "middle",
ranges: [][2]int{{10, 15}},
},
{
name: "multiple",
ranges: [][2]int{{10, 15}, {100, 125}},
},
{
name: "even more multiple parts",
ranges: [][2]int{{10, 15}, {100, 125}, {250, 252}, {261, 270}, {270, 280}},
},
}
for _, upload := range uploads {
t.Run(upload.name, func(t *testing.T) {
mockStatestore := statestore.NewStateStore()
logger := logging.New(ioutil.Discard, 0)
client, _, _ := newTestServer(t, testServerOptions{
Storer: smock.NewStorer(),
Tags: tags.NewTags(mockStatestore, logger),
Logger: logger,
})
var resp api.BzzUploadResponse
testOpts := []jsonhttptest.Option{
jsonhttptest.WithRequestBody(upload.reader),
jsonhttptest.WithRequestHeader("Content-Type", upload.contentType),
jsonhttptest.WithUnmarshalJSONResponse(&resp),
}
if upload.name == "dir" {
testOpts = append(testOpts, jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"))
}
jsonhttptest.Request(t, client, http.MethodPost, upload.uploadEndpoint, http.StatusOK,
testOpts...,
)
var downloadPath string
if upload.downloadEndpoint != "/bytes" {
downloadPath = upload.downloadEndpoint + "/" + resp.Reference.String() + "/" + upload.filepath
} else {
downloadPath = upload.downloadEndpoint + "/" + resp.Reference.String()
}
for _, tc := range ranges {
t.Run(tc.name, func(t *testing.T) {
rangeHeader, want := createRangeHeader(data, tc.ranges)
var body []byte
respHeaders := jsonhttptest.Request(t, client, http.MethodGet,
downloadPath,
http.StatusPartialContent,
jsonhttptest.WithRequestHeader("Range", rangeHeader),
jsonhttptest.WithPutResponseBody(&body),
)
got := parseRangeParts(t, respHeaders.Get("Content-Type"), body)
if len(got) != len(want) {
t.Fatalf("got %v parts, want %v parts", len(got), len(want))
}
for i := 0; i < len(want); i++ {
if !bytes.Equal(got[i], want[i]) {
t.Errorf("part %v: got %q, want %q", i, string(got[i]), string(want[i]))
}
}
})
}
})
}
}
func createRangeHeader(data []byte, ranges [][2]int) (header string, parts [][]byte) {
header = "bytes="
for i, r := range ranges {
if i > 0 {
header += ", "
}
if r[0] >= 0 && r[1] >= 0 {
parts = append(parts, data[r[0]:r[1]])
// Range: <unit>=<range-start>-<range-end>, end is inclusive
header += fmt.Sprintf("%v-%v", r[0], r[1]-1)
} else {
if r[0] >= 0 {
header += strconv.Itoa(r[0]) // Range: <unit>=<range-start>-
parts = append(parts, data[r[0]:])
}
header += "-"
if r[1] >= 0 {
if r[0] >= 0 {
// Range: <unit>=<range-start>-<range-end>, end is inclusive
header += strconv.Itoa(r[1] - 1)
} else {
// Range: <unit>=-<suffix-length>, the parameter is length
header += strconv.Itoa(r[1])
}
parts = append(parts, data[:r[1]])
}
}
}
return
}
func parseRangeParts(t *testing.T, contentType string, body []byte) (parts [][]byte) {
t.Helper()
mimetype, params, _ := mime.ParseMediaType(contentType)
if mimetype != "multipart/byteranges" {
parts = append(parts, body)
return
}
mr := multipart.NewReader(bytes.NewReader(body), params["boundary"])
for part, err := mr.NextPart(); err == nil; part, err = mr.NextPart() {
value, err := ioutil.ReadAll(part)
if err != nil {
t.Fatal(err)
}
parts = append(parts, value)
}
return parts
} }
func TestFeedIndirection(t *testing.T) { func TestFeedIndirection(t *testing.T) {
...@@ -195,17 +444,18 @@ func TestFeedIndirection(t *testing.T) { ...@@ -195,17 +444,18 @@ func TestFeedIndirection(t *testing.T) {
}, },
}) })
var resp api.FileUploadResponse var resp api.BzzUploadResponse
options := []jsonhttptest.Option{ options := []jsonhttptest.Option{
jsonhttptest.WithRequestBody(tarReader), jsonhttptest.WithRequestBody(tarReader),
jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar), jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar),
jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
jsonhttptest.WithUnmarshalJSONResponse(&resp), jsonhttptest.WithUnmarshalJSONResponse(&resp),
jsonhttptest.WithRequestHeader(api.SwarmIndexDocumentHeader, "index.html"), jsonhttptest.WithRequestHeader(api.SwarmIndexDocumentHeader, "index.html"),
} }
// verify directory tar upload response // verify directory tar upload response
jsonhttptest.Request(t, client, http.MethodPost, "/dirs", http.StatusOK, options...) jsonhttptest.Request(t, client, http.MethodPost, "/bzz", http.StatusOK, options...)
if resp.Reference.String() == "" { if resp.Reference.String() == "" {
t.Fatalf("expected file reference, did not got any") t.Fatalf("expected file reference, did not got any")
...@@ -220,10 +470,6 @@ func TestFeedIndirection(t *testing.T) { ...@@ -220,10 +470,6 @@ func TestFeedIndirection(t *testing.T) {
feedUpdate := toChunk(t, 121212, resp.Reference.Bytes()) feedUpdate := toChunk(t, 121212, resp.Reference.Bytes())
var ( var (
feedChunkAddr = swarm.MustParseHexAddress("891a1d1c8436c792d02fc2e8883fef7ab387eaeaacd25aa9f518be7be7856d54")
feedChunkData, _ = hex.DecodeString("400100000000000000000000000000000000000000000000000000000000000000000000000000005768b3b6a7db56d21d1abff40d41cebfc83448fed8d7e9b06ec0d3b073f28f200000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000012012f00000000000000000000000000000000000000000000000000000000008504f2a107ca940beafc4ce2f6c9a9f0968c62a5b5893ff0e4e1e2983048d276007e7b22737761726d2d666565642d6f776e6572223a2238643337363634343066306437623934396135653332393935643039363139613766383665363332222c22737761726d2d666565642d746f706963223a22616162626363222c22737761726d2d666565642d74797065223a2253657175656e6365227d0a0a0a0a0a0a")
chData, _ = hex.DecodeString("800000000000000000000000000000000000000000000000000000000000000000000000000000005768b3b6a7db56d21d1abff40d41cebfc83448fed8d7e9b06ec0d3b073f28f2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
manifestCh = swarm.NewChunk(swarm.MustParseHexAddress("8504f2a107ca940beafc4ce2f6c9a9f0968c62a5b5893ff0e4e1e2983048d276"), chData)
look = newMockLookup(-1, 0, feedUpdate, nil, &id{}, nil) look = newMockLookup(-1, 0, feedUpdate, nil, &id{}, nil)
factory = newMockFactory(look) factory = newMockFactory(look)
bzzDownloadResource = func(addr, path string) string { return "/bzz/" + addr + "/" + path } bzzDownloadResource = func(addr, path string) string { return "/bzz/" + addr + "/" + path }
...@@ -232,23 +478,35 @@ func TestFeedIndirection(t *testing.T) { ...@@ -232,23 +478,35 @@ func TestFeedIndirection(t *testing.T) {
client, _, _ = newTestServer(t, testServerOptions{ client, _, _ = newTestServer(t, testServerOptions{
Storer: storer, Storer: storer,
Tags: tags.NewTags(mockStatestore, logger), Tags: tags.NewTags(mockStatestore, logger),
Logger: logging.New(ioutil.Discard, 0), Logger: logger,
Feeds: factory, Feeds: factory,
}) })
_, err := storer.Put(ctx, storage.ModePutUpload, swarm.NewChunk(feedChunkAddr, feedChunkData)) _, err := storer.Put(ctx, storage.ModePutUpload, feedUpdate)
if err != nil {
t.Fatal(err)
}
m, err := manifest.NewDefaultManifest(
loadsave.New(storer, storage.ModePutUpload, false),
false,
)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
_, err = storer.Put(ctx, storage.ModePutUpload, feedUpdate) emptyAddr := make([]byte, 32)
err = m.Add(ctx, manifest.RootPath, manifest.NewEntry(swarm.NewAddress(emptyAddr), map[string]string{
api.FeedMetadataEntryOwner: "8d3766440f0d7b949a5e32995d09619a7f86e632",
api.FeedMetadataEntryTopic: "abcc",
api.FeedMetadataEntryType: "epoch",
}))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
_, err = storer.Put(ctx, storage.ModePutUpload, manifestCh) manifRef, err := m.Store(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
jsonhttptest.Request(t, client, http.MethodGet, bzzDownloadResource(feedChunkAddr.String(), ""), http.StatusOK, jsonhttptest.Request(t, client, http.MethodGet, bzzDownloadResource(manifRef.String(), ""), http.StatusOK,
jsonhttptest.WithExpectedResponse(updateData), jsonhttptest.WithExpectedResponse(updateData),
) )
} }
...@@ -6,19 +6,18 @@ package api ...@@ -6,19 +6,18 @@ package api
import ( import (
"archive/tar" "archive/tar"
"bytes"
"context" "context"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"mime" "mime"
"mime/multipart"
"net/http" "net/http"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strconv"
"strings" "strings"
"github.com/ethersphere/bee/pkg/collection/entry"
"github.com/ethersphere/bee/pkg/file" "github.com/ethersphere/bee/pkg/file"
"github.com/ethersphere/bee/pkg/file/loadsave" "github.com/ethersphere/bee/pkg/file/loadsave"
"github.com/ethersphere/bee/pkg/jsonhttp" "github.com/ethersphere/bee/pkg/jsonhttp"
...@@ -30,82 +29,92 @@ import ( ...@@ -30,82 +29,92 @@ import (
"github.com/ethersphere/bee/pkg/tracing" "github.com/ethersphere/bee/pkg/tracing"
) )
const (
contentTypeHeader = "Content-Type"
contentTypeTar = "application/x-tar"
)
const (
manifestRootPath = "/"
manifestWebsiteIndexDocumentSuffixKey = "website-index-document"
manifestWebsiteErrorDocumentPathKey = "website-error-document"
)
// dirUploadHandler uploads a directory supplied as a tar in an HTTP request // dirUploadHandler uploads a directory supplied as a tar in an HTTP request
func (s *server) dirUploadHandler(w http.ResponseWriter, r *http.Request) { func (s *server) dirUploadHandler(w http.ResponseWriter, r *http.Request) {
logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger) logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger)
err := validateRequest(r) if r.Body == http.NoBody {
logger.Error("bzz upload dir: request has no body")
jsonhttp.BadRequest(w, invalidRequest)
return
}
contentType := r.Header.Get(contentTypeHeader)
mediaType, params, err := mime.ParseMediaType(contentType)
if err != nil { if err != nil {
logger.Errorf("dir upload, validate request") logger.Errorf("bzz upload dir: invalid content-type")
logger.Debugf("dir upload, validate request err: %v", err) logger.Debugf("bzz upload dir: invalid content-type err: %v", err)
jsonhttp.BadRequest(w, "could not validate request") jsonhttp.BadRequest(w, invalidContentType)
return
}
var dReader dirReader
switch mediaType {
case contentTypeTar:
dReader = &tarReader{r: tar.NewReader(r.Body), logger: s.logger}
case multiPartFormData:
dReader = &multipartReader{r: multipart.NewReader(r.Body, params["boundary"])}
default:
logger.Error("bzz upload dir: invalid content-type for directory upload")
jsonhttp.BadRequest(w, invalidContentType)
return return
} }
defer r.Body.Close()
tag, created, err := s.getOrCreateTag(r.Header.Get(SwarmTagHeader)) tag, created, err := s.getOrCreateTag(r.Header.Get(SwarmTagHeader))
if err != nil { if err != nil {
logger.Debugf("dir upload: get or create tag: %v", err) logger.Debugf("bzz upload dir: get or create tag: %v", err)
logger.Error("dir upload: get or create tag") logger.Error("bzz upload dir: get or create tag")
jsonhttp.InternalServerError(w, "cannot get or create tag") jsonhttp.InternalServerError(w, nil)
return return
} }
// Add the tag to the context // Add the tag to the context
ctx := sctx.SetTag(r.Context(), tag) reference, err := storeDir(
p := requestPipelineFn(s.storer, r) sctx.SetTag(r.Context(), tag),
encrypt := requestEncrypt(r) requestEncrypt(r),
l := loadsave.New(s.storer, requestModePut(r), encrypt) dReader,
reference, err := storeDir(ctx, encrypt, r.Body, s.logger, p, l, r.Header.Get(SwarmIndexDocumentHeader), r.Header.Get(SwarmErrorDocumentHeader), tag, created) s.logger,
requestPipelineFn(s.storer, r),
loadsave.New(s.storer, requestModePut(r), requestEncrypt(r)),
r.Header.Get(SwarmIndexDocumentHeader),
r.Header.Get(SwarmErrorDocumentHeader),
tag,
created,
)
if err != nil { if err != nil {
logger.Debugf("dir upload: store dir err: %v", err) logger.Debugf("bzz upload dir: store dir err: %v", err)
logger.Errorf("dir upload: store dir") logger.Errorf("bzz upload dir: store dir")
jsonhttp.InternalServerError(w, "could not store dir") jsonhttp.InternalServerError(w, directoryStoreError)
return return
} }
if created { if created {
_, err = tag.DoneSplit(reference) _, err = tag.DoneSplit(reference)
if err != nil { if err != nil {
logger.Debugf("dir upload: done split: %v", err) logger.Debugf("bzz upload dir: done split: %v", err)
logger.Error("dir upload: done split failed") logger.Error("bzz upload dir: done split failed")
jsonhttp.InternalServerError(w, nil) jsonhttp.InternalServerError(w, nil)
return return
} }
} }
w.Header().Set(SwarmTagHeader, fmt.Sprint(tag.Uid)) w.Header().Set(SwarmTagHeader, fmt.Sprint(tag.Uid))
jsonhttp.OK(w, fileUploadResponse{ jsonhttp.OK(w, bzzUploadResponse{
Reference: reference, Reference: reference,
}) })
} }
// validateRequest validates an HTTP request for a directory to be uploaded // storeDir stores all files recursively contained in the directory given as a tar/multipart
func validateRequest(r *http.Request) error {
if r.Body == http.NoBody {
return errors.New("request has no body")
}
contentType := r.Header.Get(contentTypeHeader)
mediaType, _, err := mime.ParseMediaType(contentType)
if err != nil {
return err
}
if mediaType != contentTypeTar {
return errors.New("content-type not set to tar")
}
return nil
}
// storeDir stores all files recursively contained in the directory given as a tar
// it returns the hash for the uploaded manifest corresponding to the uploaded dir // it returns the hash for the uploaded manifest corresponding to the uploaded dir
func storeDir(ctx context.Context, encrypt bool, reader io.ReadCloser, log logging.Logger, p pipelineFunc, ls file.LoadSaver, indexFilename string, errorFilename string, tag *tags.Tag, tagCreated bool) (swarm.Address, error) { func storeDir(
ctx context.Context,
encrypt bool,
reader dirReader,
log logging.Logger,
p pipelineFunc,
ls file.LoadSaver,
indexFilename,
errorFilename string,
tag *tags.Tag,
tagCreated bool,
) (swarm.Address, error) {
logger := tracing.NewLoggerWithTraceID(ctx, log) logger := tracing.NewLoggerWithTraceID(ctx, log)
dirManifest, err := manifest.NewDefaultManifest(ls, encrypt) dirManifest, err := manifest.NewDefaultManifest(ls, encrypt)
...@@ -117,54 +126,21 @@ func storeDir(ctx context.Context, encrypt bool, reader io.ReadCloser, log loggi ...@@ -117,54 +126,21 @@ func storeDir(ctx context.Context, encrypt bool, reader io.ReadCloser, log loggi
return swarm.ZeroAddress, fmt.Errorf("index document suffix must not include slash character") return swarm.ZeroAddress, fmt.Errorf("index document suffix must not include slash character")
} }
// set up HTTP body reader
tarReader := tar.NewReader(reader)
defer reader.Close()
filesAdded := 0 filesAdded := 0
// iterate through the files in the supplied tar // iterate through the files in the supplied tar
for { for {
fileHeader, err := tarReader.Next() fileInfo, err := reader.Next()
if err == io.EOF { if err == io.EOF {
break break
} else if err != nil { } else if err != nil {
return swarm.ZeroAddress, fmt.Errorf("read tar stream: %w", err) return swarm.ZeroAddress, fmt.Errorf("read tar stream: %w", err)
} }
filePath := filepath.Clean(fileHeader.Name)
if filePath == "." {
logger.Warning("skipping file upload empty path")
continue
}
if runtime.GOOS == "windows" {
// always use Unix path separator
filePath = filepath.ToSlash(filePath)
}
// only store regular files
if !fileHeader.FileInfo().Mode().IsRegular() {
logger.Warningf("skipping file upload for %s as it is not a regular file", filePath)
continue
}
fileName := fileHeader.FileInfo().Name()
contentType := mime.TypeByExtension(filepath.Ext(fileHeader.Name))
// upload file
fileInfo := &fileUploadInfo{
name: fileName,
size: fileHeader.FileInfo().Size(),
contentType: contentType,
reader: tarReader,
}
if !tagCreated { if !tagCreated {
// only in the case when tag is sent via header (i.e. not created by this request) // only in the case when tag is sent via header (i.e. not created by this request)
// for each file // for each file
if estimatedTotalChunks := calculateNumberOfChunks(fileInfo.size, encrypt); estimatedTotalChunks > 0 { if estimatedTotalChunks := calculateNumberOfChunks(fileInfo.Size, encrypt); estimatedTotalChunks > 0 {
err = tag.IncN(tags.TotalChunks, estimatedTotalChunks) err = tag.IncN(tags.TotalChunks, estimatedTotalChunks)
if err != nil { if err != nil {
return swarm.ZeroAddress, fmt.Errorf("increment tag: %w", err) return swarm.ZeroAddress, fmt.Errorf("increment tag: %w", err)
...@@ -172,14 +148,18 @@ func storeDir(ctx context.Context, encrypt bool, reader io.ReadCloser, log loggi ...@@ -172,14 +148,18 @@ func storeDir(ctx context.Context, encrypt bool, reader io.ReadCloser, log loggi
} }
} }
fileReference, err := storeFile(ctx, fileInfo, p, encrypt, tag, tagCreated) fileReference, err := p(ctx, fileInfo.Reader, fileInfo.Size)
if err != nil { if err != nil {
return swarm.ZeroAddress, fmt.Errorf("store dir file: %w", err) return swarm.ZeroAddress, fmt.Errorf("store dir file: %w", err)
} }
logger.Tracef("uploaded dir file %v with reference %v", filePath, fileReference) logger.Tracef("uploaded dir file %v with reference %v", fileInfo.Path, fileReference)
fileMtdt := map[string]string{
manifest.EntryMetadataContentTypeKey: fileInfo.ContentType,
manifest.EntryMetadataFilenameKey: fileInfo.Name,
}
// add file entry to dir manifest // add file entry to dir manifest
err = dirManifest.Add(ctx, filePath, manifest.NewEntry(fileReference, nil)) err = dirManifest.Add(ctx, fileInfo.Path, manifest.NewEntry(fileReference, fileMtdt))
if err != nil { if err != nil {
return swarm.ZeroAddress, fmt.Errorf("add to manifest: %w", err) return swarm.ZeroAddress, fmt.Errorf("add to manifest: %w", err)
} }
...@@ -196,13 +176,13 @@ func storeDir(ctx context.Context, encrypt bool, reader io.ReadCloser, log loggi ...@@ -196,13 +176,13 @@ func storeDir(ctx context.Context, encrypt bool, reader io.ReadCloser, log loggi
if indexFilename != "" || errorFilename != "" { if indexFilename != "" || errorFilename != "" {
metadata := map[string]string{} metadata := map[string]string{}
if indexFilename != "" { if indexFilename != "" {
metadata[manifestWebsiteIndexDocumentSuffixKey] = indexFilename metadata[manifest.WebsiteIndexDocumentSuffixKey] = indexFilename
} }
if errorFilename != "" { if errorFilename != "" {
metadata[manifestWebsiteErrorDocumentPathKey] = errorFilename metadata[manifest.WebsiteErrorDocumentPathKey] = errorFilename
} }
rootManifestEntry := manifest.NewEntry(swarm.ZeroAddress, metadata) rootManifestEntry := manifest.NewEntry(swarm.ZeroAddress, metadata)
err = dirManifest.Add(ctx, manifestRootPath, rootManifestEntry) err = dirManifest.Add(ctx, manifest.RootPath, rootManifestEntry)
if err != nil { if err != nil {
return swarm.ZeroAddress, fmt.Errorf("add to manifest: %w", err) return swarm.ZeroAddress, fmt.Errorf("add to manifest: %w", err)
} }
...@@ -224,98 +204,111 @@ func storeDir(ctx context.Context, encrypt bool, reader io.ReadCloser, log loggi ...@@ -224,98 +204,111 @@ func storeDir(ctx context.Context, encrypt bool, reader io.ReadCloser, log loggi
} }
// save manifest // save manifest
manifestBytesReference, err := dirManifest.Store(ctx, storeSizeFn...) manifestReference, err := dirManifest.Store(ctx, storeSizeFn...)
if err != nil { if err != nil {
return swarm.ZeroAddress, fmt.Errorf("store manifest: %w", err) return swarm.ZeroAddress, fmt.Errorf("store manifest: %w", err)
} }
logger.Tracef("finished uploaded dir with reference %v", manifestReference)
// store the manifest metadata and get its reference return manifestReference, nil
m := entry.NewMetadata(manifestBytesReference.String()) }
m.MimeType = dirManifest.Type()
metadataBytes, err := json.Marshal(m)
if err != nil {
return swarm.ZeroAddress, fmt.Errorf("metadata marshal: %w", err)
}
if !tagCreated { type FileInfo struct {
// we have additional chunks: Path string
// - for manifest file metadata (1 or more) -> we use estimation function Name string
// - for manifest file collection entry (1) ContentType string
estimatedTotalChunks := calculateNumberOfChunks(int64(len(metadataBytes)), encrypt) Size int64
err = tag.IncN(tags.TotalChunks, estimatedTotalChunks+1) Reader io.Reader
}
type dirReader interface {
Next() (*FileInfo, error)
}
type tarReader struct {
r *tar.Reader
logger logging.Logger
}
func (t *tarReader) Next() (*FileInfo, error) {
for {
fileHeader, err := t.r.Next()
if err != nil { if err != nil {
return swarm.ZeroAddress, fmt.Errorf("increment tag: %w", err) return nil, err
} }
}
mr, err := p(ctx, bytes.NewReader(metadataBytes), int64(len(metadataBytes))) fileName := fileHeader.FileInfo().Name()
if err != nil { contentType := mime.TypeByExtension(filepath.Ext(fileHeader.Name))
return swarm.ZeroAddress, fmt.Errorf("split metadata: %w", err) fileSize := fileHeader.FileInfo().Size()
} filePath := filepath.Clean(fileHeader.Name)
// now join both references (fr, mr) to create an entry and store it if filePath == "." {
e := entry.New(manifestBytesReference, mr) t.logger.Warning("skipping file upload empty path")
fileEntryBytes, err := e.MarshalBinary() continue
if err != nil { }
return swarm.ZeroAddress, fmt.Errorf("entry marshal: %w", err) if runtime.GOOS == "windows" {
} // always use Unix path separator
filePath = filepath.ToSlash(filePath)
}
// only store regular files
if !fileHeader.FileInfo().Mode().IsRegular() {
t.logger.Warningf("skipping file upload for %s as it is not a regular file", filePath)
continue
}
manifestFileReference, err := p(ctx, bytes.NewReader(fileEntryBytes), int64(len(fileEntryBytes))) return &FileInfo{
if err != nil { Path: filePath,
return swarm.ZeroAddress, fmt.Errorf("split entry: %w", err) Name: fileName,
ContentType: contentType,
Size: fileSize,
Reader: t.r,
}, nil
} }
}
return manifestFileReference, nil // multipart reader returns files added as a multipart form. We will ensure all the
// part headers are passed correctly
type multipartReader struct {
r *multipart.Reader
} }
// storeFile uploads the given file and returns its reference func (m *multipartReader) Next() (*FileInfo, error) {
// this function was extracted from `fileUploadHandler` and should eventually replace its current code part, err := m.r.NextPart()
func storeFile(ctx context.Context, fileInfo *fileUploadInfo, p pipelineFunc, encrypt bool, tag *tags.Tag, tagCreated bool) (swarm.Address, error) {
// first store the file and get its reference
fr, err := p(ctx, fileInfo.reader, fileInfo.size)
if err != nil { if err != nil {
return swarm.ZeroAddress, fmt.Errorf("split file: %w", err) return nil, err
} }
// if filename is still empty, use the file hash as the filename fileName := part.FileName()
if fileInfo.name == "" { if fileName == "" {
fileInfo.name = fr.String() fileName = part.FormName()
} }
if fileName == "" {
// then store the metadata and get its reference return nil, errors.New("filename missing")
m := entry.NewMetadata(fileInfo.name)
m.MimeType = fileInfo.contentType
metadataBytes, err := json.Marshal(m)
if err != nil {
return swarm.ZeroAddress, fmt.Errorf("metadata marshal: %w", err)
} }
if !tagCreated { contentType := part.Header.Get(contentTypeHeader)
// here we have additional chunks: if contentType == "" {
// - for metadata (1 or more) -> we use estimation function return nil, errors.New("content-type missing")
// - for collection entry (1)
estimatedTotalChunks := calculateNumberOfChunks(int64(len(metadataBytes)), encrypt)
err = tag.IncN(tags.TotalChunks, estimatedTotalChunks+1)
if err != nil {
return swarm.ZeroAddress, fmt.Errorf("increment tag: %w", err)
}
} }
mr, err := p(ctx, bytes.NewReader(metadataBytes), int64(len(metadataBytes))) contentLength := part.Header.Get("Content-Length")
if err != nil { if contentLength == "" {
return swarm.ZeroAddress, fmt.Errorf("split metadata: %w", err) return nil, errors.New("content-length missing")
} }
fileSize, err := strconv.ParseInt(contentLength, 10, 64)
// now join both references (mr, fr) to create an entry and store it
e := entry.New(fr, mr)
fileEntryBytes, err := e.MarshalBinary()
if err != nil { if err != nil {
return swarm.ZeroAddress, fmt.Errorf("entry marshal: %w", err) return nil, errors.New("invalid file size")
} }
ref, err := p(ctx, bytes.NewReader(fileEntryBytes), int64(len(fileEntryBytes)))
if err != nil { if filepath.Dir(fileName) != "." {
return swarm.ZeroAddress, fmt.Errorf("split entry: %w", err) return nil, errors.New("multipart upload supports only single directory")
} }
return ref, nil return &FileInfo{
Path: fileName,
Name: fileName,
ContentType: contentType,
Size: fileSize,
Reader: part,
}, nil
} }
...@@ -9,15 +9,16 @@ import ( ...@@ -9,15 +9,16 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"io"
"io/ioutil" "io/ioutil"
"mime/multipart"
"net/http" "net/http"
"net/textproto"
"path" "path"
"strconv"
"testing" "testing"
"github.com/ethersphere/bee/pkg/api" "github.com/ethersphere/bee/pkg/api"
"github.com/ethersphere/bee/pkg/collection/entry"
"github.com/ethersphere/bee/pkg/file"
"github.com/ethersphere/bee/pkg/file/joiner"
"github.com/ethersphere/bee/pkg/file/loadsave" "github.com/ethersphere/bee/pkg/file/loadsave"
"github.com/ethersphere/bee/pkg/jsonhttp" "github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest" "github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest"
...@@ -32,26 +33,27 @@ import ( ...@@ -32,26 +33,27 @@ import (
func TestDirs(t *testing.T) { func TestDirs(t *testing.T) {
var ( var (
dirUploadResource = "/dirs" dirUploadResource = "/bzz"
fileDownloadResource = func(addr string) string { return "/files/" + addr } bzzDownloadResource = func(addr, path string) string { return "/bzz/" + addr + "/" + path }
bzzDownloadResource = func(addr, path string) string { return "/bzz/" + addr + "/" + path } ctx = context.Background()
ctx = context.Background() storer = mock.NewStorer()
storer = mock.NewStorer() mockStatestore = statestore.NewStateStore()
mockStatestore = statestore.NewStateStore() logger = logging.New(ioutil.Discard, 0)
logger = logging.New(ioutil.Discard, 0) client, _, _ = newTestServer(t, testServerOptions{
client, _, _ = newTestServer(t, testServerOptions{
Storer: storer, Storer: storer,
Tags: tags.NewTags(mockStatestore, logger), Tags: tags.NewTags(mockStatestore, logger),
Logger: logging.New(ioutil.Discard, 5), Logger: logger,
PreventRedirect: true, PreventRedirect: true,
}) })
) )
t.Run("empty request body", func(t *testing.T) { t.Run("empty request body", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost, dirUploadResource, http.StatusBadRequest, jsonhttptest.Request(t, client, http.MethodPost, dirUploadResource,
http.StatusBadRequest,
jsonhttptest.WithRequestBody(bytes.NewReader(nil)), jsonhttptest.WithRequestBody(bytes.NewReader(nil)),
jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Message: "could not validate request", Message: api.InvalidRequest.Error(),
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
}), }),
jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar), jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar),
...@@ -61,10 +63,12 @@ func TestDirs(t *testing.T) { ...@@ -61,10 +63,12 @@ func TestDirs(t *testing.T) {
t.Run("non tar file", func(t *testing.T) { t.Run("non tar file", func(t *testing.T) {
file := bytes.NewReader([]byte("some data")) file := bytes.NewReader([]byte("some data"))
jsonhttptest.Request(t, client, http.MethodPost, dirUploadResource, http.StatusInternalServerError, jsonhttptest.Request(t, client, http.MethodPost, dirUploadResource,
http.StatusInternalServerError,
jsonhttptest.WithRequestBody(file), jsonhttptest.WithRequestBody(file),
jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Message: "could not store dir", Message: api.DirectoryStoreError.Error(),
Code: http.StatusInternalServerError, Code: http.StatusInternalServerError,
}), }),
jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar), jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar),
...@@ -78,10 +82,12 @@ func TestDirs(t *testing.T) { ...@@ -78,10 +82,12 @@ func TestDirs(t *testing.T) {
}}) }})
// submit valid tar, but with wrong content-type // submit valid tar, but with wrong content-type
jsonhttptest.Request(t, client, http.MethodPost, dirUploadResource, http.StatusBadRequest, jsonhttptest.Request(t, client, http.MethodPost, dirUploadResource,
http.StatusBadRequest,
jsonhttptest.WithRequestBody(tarReader), jsonhttptest.WithRequestBody(tarReader),
jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Message: "could not validate request", Message: api.InvalidContentType.Error(),
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
}), }),
jsonhttptest.WithRequestHeader("Content-Type", "other"), jsonhttptest.WithRequestHeader("Content-Type", "other"),
...@@ -97,26 +103,25 @@ func TestDirs(t *testing.T) { ...@@ -97,26 +103,25 @@ func TestDirs(t *testing.T) {
wantErrorFilename string wantErrorFilename string
indexFilenameOption jsonhttptest.Option indexFilenameOption jsonhttptest.Option
errorFilenameOption jsonhttptest.Option errorFilenameOption jsonhttptest.Option
doMultipart bool
files []f // files in dir for test case files []f // files in dir for test case
}{ }{
{ {
name: "non-nested files without extension", name: "non-nested files without extension",
expectedReference: swarm.MustParseHexAddress("126140bb0a33d62c4efb0523db2c26be849fcf458504618de785e2a219bad374"), expectedReference: swarm.MustParseHexAddress("f3312af64715d26b5e1a3dc90f012d2c9cc74a167899dab1d07cdee8c107f939"),
files: []f{ files: []f{
{ {
data: []byte("first file data"), data: []byte("first file data"),
name: "file1", name: "file1",
dir: "", dir: "",
reference: swarm.MustParseHexAddress("3c07cd2cf5c46208d69d554b038f4dce203f53ac02cb8a313a0fe1e3fe6cc3cf"),
header: http.Header{ header: http.Header{
"Content-Type": {""}, "Content-Type": {""},
}, },
}, },
{ {
data: []byte("second file data"), data: []byte("second file data"),
name: "file2", name: "file2",
dir: "", dir: "",
reference: swarm.MustParseHexAddress("47e1a2a8f16e02da187fac791d57e6794f3e9b5d2400edd00235da749ad36683"),
header: http.Header{ header: http.Header{
"Content-Type": {""}, "Content-Type": {""},
}, },
...@@ -125,31 +130,28 @@ func TestDirs(t *testing.T) { ...@@ -125,31 +130,28 @@ func TestDirs(t *testing.T) {
}, },
{ {
name: "nested files with extension", name: "nested files with extension",
expectedReference: swarm.MustParseHexAddress("cad4b3847bd59532d9e73623d67c52e0c8d4e017d308bbaecb54f2866a91769d"), expectedReference: swarm.MustParseHexAddress("4c9c76d63856102e54092c38a7cd227d769752d768b7adc8c3542e3dd9fcf295"),
files: []f{ files: []f{
{ {
data: []byte("robots text"), data: []byte("robots text"),
name: "robots.txt", name: "robots.txt",
dir: "", dir: "",
reference: swarm.MustParseHexAddress("17b96d0a800edca59aaf7e40c6053f7c4c0fb80dd2eb3f8663d51876bf350b12"),
header: http.Header{ header: http.Header{
"Content-Type": {"text/plain; charset=utf-8"}, "Content-Type": {"text/plain; charset=utf-8"},
}, },
}, },
{ {
data: []byte("image 1"), data: []byte("image 1"),
name: "1.png", name: "1.png",
dir: "img", dir: "img",
reference: swarm.MustParseHexAddress("3c1b3fc640e67f0595d9c1db23f10c7a2b0bdc9843b0e27c53e2ac2a2d6c4674"),
header: http.Header{ header: http.Header{
"Content-Type": {"image/png"}, "Content-Type": {"image/png"},
}, },
}, },
{ {
data: []byte("image 2"), data: []byte("image 2"),
name: "2.png", name: "2.png",
dir: "img", dir: "img",
reference: swarm.MustParseHexAddress("b234ea7954cab7b2ccc5e07fe8487e932df11b2275db6b55afcbb7bad0be73fb"),
header: http.Header{ header: http.Header{
"Content-Type": {"image/png"}, "Content-Type": {"image/png"},
}, },
...@@ -158,13 +160,13 @@ func TestDirs(t *testing.T) { ...@@ -158,13 +160,13 @@ func TestDirs(t *testing.T) {
}, },
{ {
name: "no index filename", name: "no index filename",
expectedReference: swarm.MustParseHexAddress("a85aaea6a34a5c7127a3546196f2111f866fe369c6d6562ed5d3313a99388c03"), expectedReference: swarm.MustParseHexAddress("9e178dbd1ed4b748379e25144e28dfb29c07a4b5114896ef454480115a56b237"),
doMultipart: true,
files: []f{ files: []f{
{ {
data: []byte("<h1>Swarm"), data: []byte("<h1>Swarm"),
name: "index.html", name: "index.html",
dir: "", dir: "",
reference: swarm.MustParseHexAddress("bcb1bfe15c36f1a529a241f4d0c593e5648aa6d40859790894c6facb41a6ef28"),
header: http.Header{ header: http.Header{
"Content-Type": {"text/html; charset=utf-8"}, "Content-Type": {"text/html; charset=utf-8"},
}, },
...@@ -173,15 +175,15 @@ func TestDirs(t *testing.T) { ...@@ -173,15 +175,15 @@ func TestDirs(t *testing.T) {
}, },
{ {
name: "explicit index filename", name: "explicit index filename",
expectedReference: swarm.MustParseHexAddress("7d41402220f8e397ddf74d0cf4ac2055e753102bde0d622c45b03cea2b28b023"), expectedReference: swarm.MustParseHexAddress("a58484e3d77bbdb40323ddc9020c6e96e5eb5deb52015d3e0f63cce629ac1aa6"),
wantIndexFilename: "index.html", wantIndexFilename: "index.html",
indexFilenameOption: jsonhttptest.WithRequestHeader(api.SwarmIndexDocumentHeader, "index.html"), indexFilenameOption: jsonhttptest.WithRequestHeader(api.SwarmIndexDocumentHeader, "index.html"),
doMultipart: true,
files: []f{ files: []f{
{ {
data: []byte("<h1>Swarm"), data: []byte("<h1>Swarm"),
name: "index.html", name: "index.html",
dir: "", dir: "",
reference: swarm.MustParseHexAddress("bcb1bfe15c36f1a529a241f4d0c593e5648aa6d40859790894c6facb41a6ef28"),
header: http.Header{ header: http.Header{
"Content-Type": {"text/html; charset=utf-8"}, "Content-Type": {"text/html; charset=utf-8"},
}, },
...@@ -190,15 +192,14 @@ func TestDirs(t *testing.T) { ...@@ -190,15 +192,14 @@ func TestDirs(t *testing.T) {
}, },
{ {
name: "nested index filename", name: "nested index filename",
expectedReference: swarm.MustParseHexAddress("45249cf9caad842b31b29b831a1ff12aa2b711e7c282fa7a5f8c0fb544143421"), expectedReference: swarm.MustParseHexAddress("3e2f008a578c435efa7a1fce146e21c4ae8c20b80fbb4c4e0c1c87ca08fef414"),
wantIndexFilename: "index.html", wantIndexFilename: "index.html",
indexFilenameOption: jsonhttptest.WithRequestHeader(api.SwarmIndexDocumentHeader, "index.html"), indexFilenameOption: jsonhttptest.WithRequestHeader(api.SwarmIndexDocumentHeader, "index.html"),
files: []f{ files: []f{
{ {
data: []byte("<h1>Swarm"), data: []byte("<h1>Swarm"),
name: "index.html", name: "index.html",
dir: "dir", dir: "dir",
reference: swarm.MustParseHexAddress("bcb1bfe15c36f1a529a241f4d0c593e5648aa6d40859790894c6facb41a6ef28"),
header: http.Header{ header: http.Header{
"Content-Type": {"text/html; charset=utf-8"}, "Content-Type": {"text/html; charset=utf-8"},
}, },
...@@ -207,26 +208,25 @@ func TestDirs(t *testing.T) { ...@@ -207,26 +208,25 @@ func TestDirs(t *testing.T) {
}, },
{ {
name: "explicit index and error filename", name: "explicit index and error filename",
expectedReference: swarm.MustParseHexAddress("2046a4f758e2c0579ab923206a13fb041cec0925a6396f4f772c7ce859b8ca42"), expectedReference: swarm.MustParseHexAddress("2cd9a6ac11eefbb71b372fb97c3ef64109c409955964a294fdc183c1014b3844"),
wantIndexFilename: "index.html", wantIndexFilename: "index.html",
wantErrorFilename: "error.html", wantErrorFilename: "error.html",
indexFilenameOption: jsonhttptest.WithRequestHeader(api.SwarmIndexDocumentHeader, "index.html"), indexFilenameOption: jsonhttptest.WithRequestHeader(api.SwarmIndexDocumentHeader, "index.html"),
errorFilenameOption: jsonhttptest.WithRequestHeader(api.SwarmErrorDocumentHeader, "error.html"), errorFilenameOption: jsonhttptest.WithRequestHeader(api.SwarmErrorDocumentHeader, "error.html"),
doMultipart: true,
files: []f{ files: []f{
{ {
data: []byte("<h1>Swarm"), data: []byte("<h1>Swarm"),
name: "index.html", name: "index.html",
dir: "", dir: "",
reference: swarm.MustParseHexAddress("bcb1bfe15c36f1a529a241f4d0c593e5648aa6d40859790894c6facb41a6ef28"),
header: http.Header{ header: http.Header{
"Content-Type": {"text/html; charset=utf-8"}, "Content-Type": {"text/html; charset=utf-8"},
}, },
}, },
{ {
data: []byte("<h2>404"), data: []byte("<h2>404"),
name: "error.html", name: "error.html",
dir: "", dir: "",
reference: swarm.MustParseHexAddress("b1f309c095d650521b75760b23122a9c59c2b581af28fc6daaf9c58da86a204d"),
header: http.Header{ header: http.Header{
"Content-Type": {"text/html; charset=utf-8"}, "Content-Type": {"text/html; charset=utf-8"},
}, },
...@@ -235,29 +235,26 @@ func TestDirs(t *testing.T) { ...@@ -235,29 +235,26 @@ func TestDirs(t *testing.T) {
}, },
{ {
name: "invalid archive paths", name: "invalid archive paths",
expectedReference: swarm.MustParseHexAddress("6e6adb1ce936990cf1b7ecf8f01a8e3e8f939375b9bddb3d666151e0bdc08d4e"), expectedReference: swarm.MustParseHexAddress("133c92414c047708f3d6a8561571a0cc96512899ff0edbd9690c857f01ab6883"),
files: []f{ files: []f{
{ {
data: []byte("<h1>Swarm"), data: []byte("<h1>Swarm"),
name: "index.html", name: "index.html",
dir: "", dir: "",
filePath: "./index.html", filePath: "./index.html",
reference: swarm.MustParseHexAddress("bcb1bfe15c36f1a529a241f4d0c593e5648aa6d40859790894c6facb41a6ef28"),
}, },
{ {
data: []byte("body {}"), data: []byte("body {}"),
name: "app.css", name: "app.css",
dir: "", dir: "",
filePath: "./app.css", filePath: "./app.css",
reference: swarm.MustParseHexAddress("9813953280d7e02cde1efea92fe4a8fc0fdfded61e185620b43128c9b74a3e9c"),
}, },
{ {
data: []byte(`User-agent: * data: []byte(`User-agent: *
Disallow: /`), Disallow: /`),
name: "robots.txt", name: "robots.txt",
dir: "", dir: "",
filePath: "./robots.txt", filePath: "./robots.txt",
reference: swarm.MustParseHexAddress("84a620dcaf6b3ad25251c4b4d7097fa47266908a4664408057e07eb823a6a79e"),
}, },
}, },
}, },
...@@ -274,34 +271,8 @@ Disallow: /`), ...@@ -274,34 +271,8 @@ Disallow: /`),
}, },
}, },
} { } {
t.Run(tc.name, func(t *testing.T) { verify := func(t *testing.T, resp api.BzzUploadResponse) {
// tar all the test case files t.Helper()
tarReader := tarFiles(t, tc.files)
var resp api.FileUploadResponse
options := []jsonhttptest.Option{
jsonhttptest.WithRequestBody(tarReader),
jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar),
jsonhttptest.WithUnmarshalJSONResponse(&resp),
}
if tc.indexFilenameOption != nil {
options = append(options, tc.indexFilenameOption)
}
if tc.errorFilenameOption != nil {
options = append(options, tc.errorFilenameOption)
}
if tc.encrypt {
options = append(options, jsonhttptest.WithRequestHeader(api.SwarmEncryptHeader, "true"))
}
// verify directory tar upload response
jsonhttptest.Request(t, client, http.MethodPost, dirUploadResource, http.StatusOK, options...)
if resp.Reference.String() == "" {
t.Fatalf("expected file reference, did not got any")
}
// NOTE: reference will be different each time when encryption is enabled // NOTE: reference will be different each time when encryption is enabled
if !tc.encrypt { if !tc.encrypt {
if !resp.Reference.Equal(tc.expectedReference) { if !resp.Reference.Equal(tc.expectedReference) {
...@@ -309,27 +280,9 @@ Disallow: /`), ...@@ -309,27 +280,9 @@ Disallow: /`),
} }
} }
// read manifest metadata
j, _, err := joiner.New(context.Background(), storer, resp.Reference)
if err != nil {
t.Fatal(err)
}
buf := bytes.NewBuffer(nil)
_, err = file.JoinReadAll(context.Background(), j, buf)
if err != nil {
t.Fatal(err)
}
e := &entry.Entry{}
err = e.UnmarshalBinary(buf.Bytes())
if err != nil {
t.Fatal(err)
}
// verify manifest content // verify manifest content
verifyManifest, err := manifest.NewManifestReference( verifyManifest, err := manifest.NewDefaultManifestReference(
manifest.DefaultManifestType, resp.Reference,
e.Reference(),
loadsave.New(storer, storage.ModePutRequest, false), loadsave.New(storer, storage.ModePutRequest, false),
) )
if err != nil { if err != nil {
...@@ -339,20 +292,9 @@ Disallow: /`), ...@@ -339,20 +292,9 @@ Disallow: /`),
validateFile := func(t *testing.T, file f, filePath string) { validateFile := func(t *testing.T, file f, filePath string) {
t.Helper() t.Helper()
entry, err := verifyManifest.Lookup(ctx, filePath) jsonhttptest.Request(t, client, http.MethodGet,
if err != nil { bzzDownloadResource(resp.Reference.String(), filePath),
t.Fatal(err) http.StatusOK,
}
fileReference := entry.Reference()
if !tc.encrypt {
if !bytes.Equal(file.reference.Bytes(), fileReference.Bytes()) {
t.Fatalf("expected file reference to match %s, got %s", file.reference, fileReference)
}
}
jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(fileReference.String()), http.StatusOK,
jsonhttptest.WithExpectedResponse(file.data), jsonhttptest.WithExpectedResponse(file.data),
jsonhttptest.WithRequestHeader("Content-Type", file.header.Get("Content-Type")), jsonhttptest.WithRequestHeader("Content-Type", file.header.Get("Content-Type")),
) )
...@@ -361,28 +303,28 @@ Disallow: /`), ...@@ -361,28 +303,28 @@ Disallow: /`),
validateIsPermanentRedirect := func(t *testing.T, fromPath, toPath string) { validateIsPermanentRedirect := func(t *testing.T, fromPath, toPath string) {
t.Helper() t.Helper()
expectedResponse := fmt.Sprintf("<a href=\"%s\">Permanent Redirect</a>.\n\n", bzzDownloadResource(resp.Reference.String(), toPath)) expectedResponse := fmt.Sprintf("<a href=\"%s\">Permanent Redirect</a>.\n\n",
bzzDownloadResource(resp.Reference.String(), toPath))
jsonhttptest.Request(t, client, http.MethodGet, bzzDownloadResource(resp.Reference.String(), fromPath), http.StatusPermanentRedirect, jsonhttptest.Request(t, client, http.MethodGet,
bzzDownloadResource(resp.Reference.String(), fromPath),
http.StatusPermanentRedirect,
jsonhttptest.WithExpectedResponse([]byte(expectedResponse)), jsonhttptest.WithExpectedResponse([]byte(expectedResponse)),
) )
} }
validateBzzPath := func(t *testing.T, fromPath, toPath string) { validateAltPath := func(t *testing.T, fromPath, toPath string) {
t.Helper() t.Helper()
toEntry, err := verifyManifest.Lookup(ctx, toPath)
if err != nil {
t.Fatal(err)
}
var respBytes []byte var respBytes []byte
jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(toEntry.Reference().String()), http.StatusOK, jsonhttptest.Request(t, client, http.MethodGet,
bzzDownloadResource(resp.Reference.String(), toPath), http.StatusOK,
jsonhttptest.WithPutResponseBody(&respBytes), jsonhttptest.WithPutResponseBody(&respBytes),
) )
jsonhttptest.Request(t, client, http.MethodGet, bzzDownloadResource(resp.Reference.String(), fromPath), http.StatusOK, jsonhttptest.Request(t, client, http.MethodGet,
bzzDownloadResource(resp.Reference.String(), fromPath), http.StatusOK,
jsonhttptest.WithExpectedResponse(respBytes), jsonhttptest.WithExpectedResponse(respBytes),
) )
} }
...@@ -394,13 +336,13 @@ Disallow: /`), ...@@ -394,13 +336,13 @@ Disallow: /`),
// check index filename // check index filename
if tc.wantIndexFilename != "" { if tc.wantIndexFilename != "" {
entry, err := verifyManifest.Lookup(ctx, api.ManifestRootPath) entry, err := verifyManifest.Lookup(ctx, manifest.RootPath)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
manifestRootMetadata := entry.Metadata() manifestRootMetadata := entry.Metadata()
indexDocumentSuffixPath, ok := manifestRootMetadata[api.ManifestWebsiteIndexDocumentSuffixKey] indexDocumentSuffixPath, ok := manifestRootMetadata[manifest.WebsiteIndexDocumentSuffixKey]
if !ok { if !ok {
t.Fatalf("expected index filename '%s', did not find any", tc.wantIndexFilename) t.Fatalf("expected index filename '%s', did not find any", tc.wantIndexFilename)
} }
...@@ -409,28 +351,94 @@ Disallow: /`), ...@@ -409,28 +351,94 @@ Disallow: /`),
for _, file := range tc.files { for _, file := range tc.files {
if file.dir != "" { if file.dir != "" {
validateIsPermanentRedirect(t, file.dir, file.dir+"/") validateIsPermanentRedirect(t, file.dir, file.dir+"/")
validateBzzPath(t, file.dir+"/", path.Join(file.dir, indexDocumentSuffixPath)) validateAltPath(t, file.dir+"/", path.Join(file.dir, indexDocumentSuffixPath))
} }
} }
} }
// check error filename // check error filename
if tc.wantErrorFilename != "" { if tc.wantErrorFilename != "" {
entry, err := verifyManifest.Lookup(ctx, api.ManifestRootPath) entry, err := verifyManifest.Lookup(ctx, manifest.RootPath)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
manifestRootMetadata := entry.Metadata() manifestRootMetadata := entry.Metadata()
errorDocumentPath, ok := manifestRootMetadata[api.ManifestWebsiteErrorDocumentPathKey] errorDocumentPath, ok := manifestRootMetadata[manifest.WebsiteErrorDocumentPathKey]
if !ok { if !ok {
t.Fatalf("expected error filename '%s', did not find any", tc.wantErrorFilename) t.Fatalf("expected error filename '%s', did not find any", tc.wantErrorFilename)
} }
// check error document // check error document
validateBzzPath(t, "_non_existent_file_path_", errorDocumentPath) validateAltPath(t, "_non_existent_file_path_", errorDocumentPath)
} }
}
t.Run(tc.name, func(t *testing.T) {
t.Run("tar_upload", func(t *testing.T) {
// tar all the test case files
tarReader := tarFiles(t, tc.files)
var resp api.BzzUploadResponse
options := []jsonhttptest.Option{
jsonhttptest.WithRequestBody(tarReader),
jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar),
jsonhttptest.WithUnmarshalJSONResponse(&resp),
}
if tc.indexFilenameOption != nil {
options = append(options, tc.indexFilenameOption)
}
if tc.errorFilenameOption != nil {
options = append(options, tc.errorFilenameOption)
}
if tc.encrypt {
options = append(options, jsonhttptest.WithRequestHeader(api.SwarmEncryptHeader, "true"))
}
// verify directory tar upload response
jsonhttptest.Request(t, client, http.MethodPost, dirUploadResource, http.StatusOK, options...)
if resp.Reference.String() == "" {
t.Fatalf("expected file reference, did not got any")
}
verify(t, resp)
})
if tc.doMultipart {
t.Run("multipart_upload", func(t *testing.T) {
// tar all the test case files
mwReader, mwBoundary := multipartFiles(t, tc.files)
var resp api.BzzUploadResponse
options := []jsonhttptest.Option{
jsonhttptest.WithRequestBody(mwReader),
jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
jsonhttptest.WithRequestHeader("Content-Type", fmt.Sprintf("multipart/form-data; boundary=%q", mwBoundary)),
jsonhttptest.WithUnmarshalJSONResponse(&resp),
}
if tc.indexFilenameOption != nil {
options = append(options, tc.indexFilenameOption)
}
if tc.errorFilenameOption != nil {
options = append(options, tc.errorFilenameOption)
}
if tc.encrypt {
options = append(options, jsonhttptest.WithRequestHeader(api.SwarmEncryptHeader, "true"))
}
// verify directory tar upload response
jsonhttptest.Request(t, client, http.MethodPost, dirUploadResource, http.StatusOK, options...)
if resp.Reference.String() == "" {
t.Fatalf("expected file reference, did not got any")
}
verify(t, resp)
})
}
}) })
} }
} }
...@@ -473,12 +481,49 @@ func tarFiles(t *testing.T, files []f) *bytes.Buffer { ...@@ -473,12 +481,49 @@ func tarFiles(t *testing.T, files []f) *bytes.Buffer {
return &buf return &buf
} }
func multipartFiles(t *testing.T, files []f) (*bytes.Buffer, string) {
t.Helper()
var buf bytes.Buffer
mw := multipart.NewWriter(&buf)
for _, file := range files {
hdr := make(textproto.MIMEHeader)
if file.name != "" {
hdr.Set("Content-Disposition", fmt.Sprintf("form-data; name=%q", file.name))
}
contentType := file.header.Get("Content-Type")
if contentType != "" {
hdr.Set("Content-Type", contentType)
}
if len(file.data) > 0 {
hdr.Set("Content-Length", strconv.Itoa(len(file.data)))
}
part, err := mw.CreatePart(hdr)
if err != nil {
t.Fatal(err)
}
if _, err = io.Copy(part, bytes.NewBuffer(file.data)); err != nil {
t.Fatal(err)
}
}
// finally close the tar writer
if err := mw.Close(); err != nil {
t.Fatal(err)
}
return &buf, mw.Boundary()
}
// struct for dir files for test cases // struct for dir files for test cases
type f struct { type f struct {
data []byte data []byte
name string name string
dir string dir string
filePath string filePath string
reference swarm.Address header http.Header
header http.Header
} }
...@@ -13,7 +13,7 @@ type ( ...@@ -13,7 +13,7 @@ type (
ChunkAddressResponse = chunkAddressResponse ChunkAddressResponse = chunkAddressResponse
SocPostResponse = socPostResponse SocPostResponse = socPostResponse
FeedReferenceResponse = feedReferenceResponse FeedReferenceResponse = feedReferenceResponse
FileUploadResponse = fileUploadResponse BzzUploadResponse = bzzUploadResponse
TagResponse = tagResponse TagResponse = tagResponse
TagRequest = tagRequest TagRequest = tagRequest
ListTagsResponse = listTagsResponse ListTagsResponse = listTagsResponse
...@@ -23,13 +23,13 @@ type ( ...@@ -23,13 +23,13 @@ type (
) )
var ( var (
ContentTypeTar = contentTypeTar InvalidContentType = invalidContentType
InvalidRequest = invalidRequest
DirectoryStoreError = directoryStoreError
) )
var ( var (
ManifestRootPath = manifestRootPath ContentTypeTar = contentTypeTar
ManifestWebsiteIndexDocumentSuffixKey = manifestWebsiteIndexDocumentSuffixKey
ManifestWebsiteErrorDocumentPathKey = manifestWebsiteErrorDocumentPathKey
) )
var ( var (
......
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package api
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net/http"
"os"
"strconv"
"time"
"github.com/ethersphere/bee/pkg/collection/entry"
"github.com/ethersphere/bee/pkg/file"
"github.com/ethersphere/bee/pkg/file/joiner"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/sctx"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/tracing"
"github.com/ethersphere/langos"
"github.com/gorilla/mux"
)
const (
multiPartFormData = "multipart/form-data"
)
// fileUploadResponse is returned when an HTTP request to upload a file is successful
type fileUploadResponse struct {
Reference swarm.Address `json:"reference"`
}
// fileUploadHandler uploads the file and its metadata supplied as:
// - multipart http message
// - other content types as complete file body
func (s *server) fileUploadHandler(w http.ResponseWriter, r *http.Request) {
var (
reader io.Reader
logger = tracing.NewLoggerWithTraceID(r.Context(), s.logger)
fileName, contentLength string
fileSize uint64
contentType = r.Header.Get("Content-Type")
)
mediaType, params, err := mime.ParseMediaType(contentType)
if err != nil {
logger.Debugf("file upload: parse content type header %q: %v", contentType, err)
logger.Errorf("file upload: parse content type header %q", contentType)
jsonhttp.BadRequest(w, "invalid content-type header")
return
}
tag, created, err := s.getOrCreateTag(r.Header.Get(SwarmTagHeader))
if err != nil {
logger.Debugf("file upload: get or create tag: %v", err)
logger.Error("file upload: get or create tag")
jsonhttp.InternalServerError(w, "cannot get or create tag")
return
}
if !created {
// only in the case when tag is sent via header (i.e. not created by this request)
if estimatedTotalChunks := requestCalculateNumberOfChunks(r); estimatedTotalChunks > 0 {
err = tag.IncN(tags.TotalChunks, estimatedTotalChunks)
if err != nil {
s.logger.Debugf("file upload: increment tag: %v", err)
s.logger.Error("file upload: increment tag")
jsonhttp.InternalServerError(w, "increment tag")
return
}
}
}
// Add the tag to the context
ctx := sctx.SetTag(r.Context(), tag)
if mediaType == multiPartFormData {
mr := multipart.NewReader(r.Body, params["boundary"])
// read only the first part, as only one file upload is supported
part, err := mr.NextPart()
if err != nil {
logger.Debugf("file upload: read multipart: %v", err)
logger.Error("file upload: read multipart")
jsonhttp.BadRequest(w, "invalid multipart/form-data")
return
}
// try to find filename
// 1) in part header params
// 2) as formname
// 3) file reference hash (after uploading the file)
if fileName = part.FileName(); fileName == "" {
fileName = part.FormName()
}
// then find out content type
contentType = part.Header.Get("Content-Type")
if contentType == "" {
br := bufio.NewReader(part)
buf, err := br.Peek(512)
if err != nil && err != io.EOF {
logger.Debugf("file upload: read content type, file %q: %v", fileName, err)
logger.Errorf("file upload: read content type, file %q", fileName)
jsonhttp.BadRequest(w, "error reading content type")
return
}
contentType = http.DetectContentType(buf)
reader = br
} else {
reader = part
}
contentLength = part.Header.Get("Content-Length")
} else {
fileName = r.URL.Query().Get("name")
contentLength = r.Header.Get("Content-Length")
reader = r.Body
}
if contentLength != "" {
fileSize, err = strconv.ParseUint(contentLength, 10, 64)
if err != nil {
logger.Debugf("file upload: content length, file %q: %v", fileName, err)
logger.Errorf("file upload: content length, file %q", fileName)
jsonhttp.BadRequest(w, "invalid content length header")
return
}
} else {
// copy the part to a tmp file to get its size
tmp, err := ioutil.TempFile("", "bee-multipart")
if err != nil {
logger.Debugf("file upload: create temporary file: %v", err)
logger.Errorf("file upload: create temporary file")
jsonhttp.InternalServerError(w, nil)
return
}
defer os.Remove(tmp.Name())
defer tmp.Close()
n, err := io.Copy(tmp, reader)
if err != nil {
logger.Debugf("file upload: write temporary file: %v", err)
logger.Error("file upload: write temporary file")
jsonhttp.InternalServerError(w, nil)
return
}
if _, err := tmp.Seek(0, io.SeekStart); err != nil {
logger.Debugf("file upload: seek to beginning of temporary file: %v", err)
logger.Error("file upload: seek to beginning of temporary file")
jsonhttp.InternalServerError(w, nil)
return
}
fileSize = uint64(n)
reader = tmp
}
p := requestPipelineFn(s.storer, r)
// first store the file and get its reference
fr, err := p(ctx, reader, int64(fileSize))
if err != nil {
logger.Debugf("file upload: file store, file %q: %v", fileName, err)
logger.Errorf("file upload: file store, file %q", fileName)
jsonhttp.InternalServerError(w, "could not store file data")
return
}
// If filename is still empty, use the file hash as the filename
if fileName == "" {
fileName = fr.String()
}
// then store the metadata and get its reference
m := entry.NewMetadata(fileName)
m.MimeType = contentType
metadataBytes, err := json.Marshal(m)
if err != nil {
logger.Debugf("file upload: metadata marshal, file %q: %v", fileName, err)
logger.Errorf("file upload: metadata marshal, file %q", fileName)
jsonhttp.InternalServerError(w, "metadata marshal error")
return
}
if !created {
// only in the case when tag is sent via header (i.e. not created by this request)
// here we have additional chunks:
// - for metadata (1 or more) -> we use estimation function
// - for collection entry (1)
estimatedTotalChunks := calculateNumberOfChunks(int64(len(metadataBytes)), requestEncrypt(r))
err = tag.IncN(tags.TotalChunks, estimatedTotalChunks+1)
if err != nil {
s.logger.Debugf("file upload: increment tag: %v", err)
s.logger.Error("file upload: increment tag")
jsonhttp.InternalServerError(w, "increment tag")
return
}
}
mr, err := p(ctx, bytes.NewReader(metadataBytes), int64(len(metadataBytes)))
if err != nil {
logger.Debugf("file upload: metadata store, file %q: %v", fileName, err)
logger.Errorf("file upload: metadata store, file %q", fileName)
jsonhttp.InternalServerError(w, "could not store metadata")
return
}
// now join both references (mr,fr) to create an entry and store it.
entrie := entry.New(fr, mr)
fileEntryBytes, err := entrie.MarshalBinary()
if err != nil {
logger.Debugf("file upload: entry marshal, file %q: %v", fileName, err)
logger.Errorf("file upload: entry marshal, file %q", fileName)
jsonhttp.InternalServerError(w, "entry marshal error")
return
}
reference, err := p(ctx, bytes.NewReader(fileEntryBytes), int64(len(fileEntryBytes)))
if err != nil {
logger.Debugf("file upload: entry store, file %q: %v", fileName, err)
logger.Errorf("file upload: entry store, file %q", fileName)
jsonhttp.InternalServerError(w, "could not store entry")
return
}
if created {
_, err = tag.DoneSplit(reference)
if err != nil {
logger.Debugf("file upload: done split: %v", err)
logger.Error("file upload: done split failed")
jsonhttp.InternalServerError(w, nil)
return
}
}
w.Header().Set("ETag", fmt.Sprintf("%q", reference.String()))
w.Header().Set(SwarmTagHeader, fmt.Sprint(tag.Uid))
w.Header().Set("Access-Control-Expose-Headers", SwarmTagHeader)
jsonhttp.OK(w, fileUploadResponse{
Reference: reference,
})
}
// fileUploadInfo contains the data for a file to be uploaded
type fileUploadInfo struct {
name string // file name
size int64 // file size
contentType string
reader io.Reader
}
// fileDownloadHandler downloads the file given the entry's reference.
func (s *server) fileDownloadHandler(w http.ResponseWriter, r *http.Request) {
logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger)
nameOrHex := mux.Vars(r)["addr"]
address, err := s.resolveNameOrAddress(nameOrHex)
if err != nil {
logger.Debugf("file download: parse file address %s: %v", nameOrHex, err)
logger.Errorf("file download: parse file address %s", nameOrHex)
jsonhttp.NotFound(w, nil)
return
}
targets := r.URL.Query().Get("targets")
if targets != "" {
r = r.WithContext(sctx.SetTargets(r.Context(), targets))
}
// read entry
j, _, err := joiner.New(r.Context(), s.storer, address)
if err != nil {
logger.Debugf("file download: joiner %s: %v", address, err)
logger.Errorf("file download: joiner %s", address)
jsonhttp.NotFound(w, nil)
return
}
buf := bytes.NewBuffer(nil)
_, err = file.JoinReadAll(r.Context(), j, buf)
if err != nil {
logger.Debugf("file download: read entry %s: %v", address, err)
logger.Errorf("file download: read entry %s", address)
jsonhttp.NotFound(w, nil)
return
}
e := &entry.Entry{}
err = e.UnmarshalBinary(buf.Bytes())
if err != nil {
logger.Debugf("file download: unmarshal entry %s: %v", address, err)
logger.Errorf("file download: unmarshal entry %s", address)
jsonhttp.NotFound(w, nil)
return
}
// If none match header is set always send the reply as not modified
// TODO: when SOC comes, we need to revisit this concept
noneMatchEtag := r.Header.Get("If-None-Match")
if noneMatchEtag != "" {
if e.Reference().Equal(address) {
w.WriteHeader(http.StatusNotModified)
return
}
}
// read metadata
j, _, err = joiner.New(r.Context(), s.storer, e.Metadata())
if err != nil {
logger.Debugf("file download: joiner %s: %v", address, err)
logger.Errorf("file download: joiner %s", address)
jsonhttp.NotFound(w, nil)
return
}
buf = bytes.NewBuffer(nil)
_, err = file.JoinReadAll(r.Context(), j, buf)
if err != nil {
logger.Debugf("file download: read metadata %s: %v", nameOrHex, err)
logger.Errorf("file download: read metadata %s", nameOrHex)
jsonhttp.NotFound(w, nil)
return
}
metaData := &entry.Metadata{}
err = json.Unmarshal(buf.Bytes(), metaData)
if err != nil {
logger.Debugf("file download: unmarshal metadata %s: %v", nameOrHex, err)
logger.Errorf("file download: unmarshal metadata %s", nameOrHex)
jsonhttp.NotFound(w, nil)
return
}
additionalHeaders := http.Header{
"Content-Disposition": {fmt.Sprintf("inline; filename=\"%s\"", metaData.Filename)},
"Content-Type": {metaData.MimeType},
}
s.downloadHandler(w, r, e.Reference(), additionalHeaders, true)
}
// downloadHandler contains common logic for dowloading Swarm file from API
func (s *server) downloadHandler(w http.ResponseWriter, r *http.Request, reference swarm.Address, additionalHeaders http.Header, etag bool) {
logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger)
targets := r.URL.Query().Get("targets")
if targets != "" {
r = r.WithContext(sctx.SetTargets(r.Context(), targets))
}
reader, l, err := joiner.New(r.Context(), s.storer, reference)
if err != nil {
if errors.Is(err, storage.ErrNotFound) {
logger.Debugf("api download: not found %s: %v", reference, err)
logger.Error("api download: not found")
jsonhttp.NotFound(w, nil)
return
}
logger.Debugf("api download: invalid root chunk %s: %v", reference, err)
logger.Error("api download: invalid root chunk")
jsonhttp.NotFound(w, nil)
return
}
// include additional headers
for name, values := range additionalHeaders {
var v string
for _, value := range values {
if v != "" {
v += "; "
}
v += value
}
w.Header().Set(name, v)
}
if etag {
w.Header().Set("ETag", fmt.Sprintf("%q", reference))
}
w.Header().Set("Content-Length", fmt.Sprintf("%d", l))
w.Header().Set("Decompressed-Content-Length", fmt.Sprintf("%d", l))
w.Header().Set("Access-Control-Expose-Headers", "Content-Disposition")
if targets != "" {
w.Header().Set(TargetsRecoveryHeader, targets)
}
http.ServeContent(w, r, "", time.Now(), langos.NewBufferedLangos(reader, lookaheadBufferSize(l)))
}
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package api_test
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net/http"
"strconv"
"strings"
"testing"
statestore "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/api"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/storage/mock"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
)
func TestFiles(t *testing.T) {
var (
fileUploadResource = "/files"
targets = "0x222"
fileDownloadResource = func(addr string) string { return "/files/" + addr }
simpleData = []byte("this is a simple text")
mockStatestore = statestore.NewStateStore()
logger = logging.New(ioutil.Discard, 0)
client, _, _ = newTestServer(t, testServerOptions{
Storer: mock.NewStorer(),
Tags: tags.NewTags(mockStatestore, logger),
})
)
t.Run("invalid-content-type", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource, http.StatusBadRequest,
jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Message: "invalid content-type header",
Code: http.StatusBadRequest,
}),
)
})
t.Run("multipart-upload", func(t *testing.T) {
fileName := "simple_file.txt"
rootHash := "295673cf7aa55d119dd6f82528c91d45b53dd63dc2e4ca4abf4ed8b3a0788085"
jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource, http.StatusOK,
jsonhttptest.WithMultipartRequest(bytes.NewReader(simpleData), len(simpleData), fileName, ""),
jsonhttptest.WithExpectedJSONResponse(api.FileUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
}),
)
})
t.Run("encrypt-decrypt", func(t *testing.T) {
fileName := "my-pictures.jpeg"
var resp api.FileUploadResponse
jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource+"?name="+fileName, http.StatusOK,
jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithRequestHeader(api.SwarmEncryptHeader, "True"),
jsonhttptest.WithRequestHeader("Content-Type", "image/jpeg; charset=utf-8"),
jsonhttptest.WithUnmarshalJSONResponse(&resp),
)
rootHash := resp.Reference.String()
rcvdHeader := jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(rootHash), http.StatusOK,
jsonhttptest.WithExpectedResponse(simpleData),
)
cd := rcvdHeader.Get("Content-Disposition")
_, params, err := mime.ParseMediaType(cd)
if err != nil {
t.Fatal(err)
}
if params["filename"] != fileName {
t.Fatal("Invalid file name detected")
}
if rcvdHeader.Get("Content-Type") != "image/jpeg; charset=utf-8" {
t.Fatal("Invalid content type detected")
}
})
t.Run("check-content-type-detection", func(t *testing.T) {
fileName := "my-pictures.jpeg"
rootHash := "f2e761160deda91c1fbfab065a5abf530b0766b3e102b51fbd626ba37c3bc581"
t.Run("binary", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource+"?name="+fileName, http.StatusOK,
jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithExpectedJSONResponse(api.FileUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
}),
jsonhttptest.WithRequestHeader("Content-Type", "image/jpeg; charset=utf-8"),
)
rcvdHeader := jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(rootHash), http.StatusOK,
jsonhttptest.WithExpectedResponse(simpleData),
)
cd := rcvdHeader.Get("Content-Disposition")
_, params, err := mime.ParseMediaType(cd)
if err != nil {
t.Fatal(err)
}
if params["filename"] != fileName {
t.Fatal("Invalid file name detected")
}
if rcvdHeader.Get("Content-Type") != "image/jpeg; charset=utf-8" {
t.Fatal("Invalid content type detected")
}
})
t.Run("multipart", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource, http.StatusOK,
jsonhttptest.WithMultipartRequest(bytes.NewReader(simpleData), len(simpleData), fileName, "image/jpeg; charset=utf-8"),
jsonhttptest.WithExpectedJSONResponse(api.FileUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
}),
)
rcvdHeader := jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(rootHash), http.StatusOK,
jsonhttptest.WithExpectedResponse(simpleData),
)
cd := rcvdHeader.Get("Content-Disposition")
_, params, err := mime.ParseMediaType(cd)
if err != nil {
t.Fatal(err)
}
if params["filename"] != fileName {
t.Fatal("Invalid file name detected")
}
if rcvdHeader.Get("Content-Type") != "image/jpeg; charset=utf-8" {
t.Fatal("Invalid content type detected")
}
})
})
t.Run("upload-then-download-and-check-data", func(t *testing.T) {
fileName := "sample.html"
rootHash := "9f8ba407ff4809e877c75506247e0f1faf206262d1ddd7b3c8f9775d3501be50"
sampleHtml := `<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>`
t.Run("binary", func(t *testing.T) {
rcvdHeader := jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource+"?name="+fileName, http.StatusOK,
jsonhttptest.WithRequestBody(strings.NewReader(sampleHtml)),
jsonhttptest.WithExpectedJSONResponse(api.FileUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
}),
jsonhttptest.WithRequestHeader("Content-Type", "text/html; charset=utf-8"),
)
if rcvdHeader.Get("ETag") != fmt.Sprintf("%q", rootHash) {
t.Fatal("Invalid ETags header received")
}
// try to fetch the same file and check the data
rcvdHeader = jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(rootHash), http.StatusOK,
jsonhttptest.WithExpectedResponse([]byte(sampleHtml)),
)
// check the headers
cd := rcvdHeader.Get("Content-Disposition")
_, params, err := mime.ParseMediaType(cd)
if err != nil {
t.Fatal(err)
}
if params["filename"] != fileName {
t.Fatal("Invalid filename detected")
}
if rcvdHeader.Get("Content-Type") != "text/html; charset=utf-8" {
t.Fatal("Invalid content type detected")
}
})
t.Run("multipart", func(t *testing.T) {
rcvdHeader := jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource, http.StatusOK,
jsonhttptest.WithMultipartRequest(strings.NewReader(sampleHtml), len(sampleHtml), fileName, ""),
jsonhttptest.WithExpectedJSONResponse(api.FileUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
}),
)
if rcvdHeader.Get("ETag") != fmt.Sprintf("%q", rootHash) {
t.Fatal("Invalid ETags header received")
}
// try to fetch the same file and check the data
rcvdHeader = jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(rootHash), http.StatusOK,
jsonhttptest.WithExpectedResponse([]byte(sampleHtml)),
)
// check the headers
cd := rcvdHeader.Get("Content-Disposition")
_, params, err := mime.ParseMediaType(cd)
if err != nil {
t.Fatal(err)
}
if params["filename"] != fileName {
t.Fatal("Invalid filename detected")
}
if rcvdHeader.Get("Content-Type") != "text/html; charset=utf-8" {
t.Fatal("Invalid content type detected")
}
})
})
t.Run("upload-then-download-with-targets", func(t *testing.T) {
fileName := "simple_file.txt"
rootHash := "19d2e82c076031ec4e456978f839472d2f1b1b969a765420404d8d315a0c6123"
jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource+"?name="+fileName, http.StatusOK,
jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithExpectedJSONResponse(api.FileUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
}),
jsonhttptest.WithRequestHeader("Content-Type", "text/html; charset=utf-8"),
)
rcvdHeader := jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(rootHash)+"?targets="+targets, http.StatusOK,
jsonhttptest.WithExpectedResponse(simpleData),
)
if rcvdHeader.Get(api.TargetsRecoveryHeader) != targets {
t.Fatalf("targets mismatch. got %s, want %s", rcvdHeader.Get(api.TargetsRecoveryHeader), targets)
}
})
}
// TestRangeRequests validates that all endpoints are serving content with
// respect to HTTP Range headers.
func TestRangeRequests(t *testing.T) {
data := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus dignissim tincidunt orci id aliquam. Praesent eget turpis in lectus semper consectetur et ut nibh. Nam rhoncus, augue sit amet sollicitudin lacinia, turpis tortor molestie urna, at mattis sem sapien sit amet augue. In bibendum ex vel odio dignissim interdum. Quisque hendrerit sapien et porta condimentum. Vestibulum efficitur mauris tellus, eget vestibulum sapien vulputate ac. Proin et vulputate sapien. Duis tincidunt mauris vulputate porta venenatis. Sed dictum aliquet urna, sit amet fermentum velit pellentesque vitae. Nam sed nisi ultrices, volutpat quam et, malesuada sapien. Nunc gravida non orci at rhoncus. Sed vitae dui accumsan, venenatis lectus et, mattis tellus. Proin sed mauris eu mi congue lacinia.")
uploads := []struct {
name string
uploadEndpoint string
downloadEndpoint string
reference string
filepath string
reader io.Reader
contentType string
}{
{
name: "bytes",
uploadEndpoint: "/bytes",
downloadEndpoint: "/bytes",
reference: "4985af9dc3339ad3111c71651b92df7f21587391c01d3aa34a26879b9a1beb78",
reader: bytes.NewReader(data),
contentType: "text/plain; charset=utf-8",
},
{
name: "file",
uploadEndpoint: "/files",
downloadEndpoint: "/files",
reference: "e387331d1c9d82f2cb01c47a4ffcdf2ed0c047cbe283e484a64fd61bffc410e7",
reader: bytes.NewReader(data),
contentType: "text/plain; charset=utf-8",
},
{
name: "bzz",
uploadEndpoint: "/dirs",
downloadEndpoint: "/bzz",
filepath: "/ipsum/lorem.txt",
reference: "96c68b99304b0868189e5c1d6c10be1984d93e88aab0384907f6b8814f60150b",
reader: tarFiles(t, []f{
{
data: data,
name: "lorem.txt",
dir: "ipsum",
reference: swarm.MustParseHexAddress("4985af9dc3339ad3111c71651b92df7f21587391c01d3aa34a26879b9a1beb78"),
header: http.Header{
"Content-Type": {"text/plain; charset=utf-8"},
},
},
}),
contentType: api.ContentTypeTar,
},
}
ranges := []struct {
name string
ranges [][2]int
}{
{
name: "all",
ranges: [][2]int{{0, len(data)}},
},
{
name: "all without end",
ranges: [][2]int{{0, -1}},
},
{
name: "all without start",
ranges: [][2]int{{-1, len(data)}},
},
{
name: "head",
ranges: [][2]int{{0, 50}},
},
{
name: "tail",
ranges: [][2]int{{250, len(data)}},
},
{
name: "middle",
ranges: [][2]int{{10, 15}},
},
{
name: "multiple",
ranges: [][2]int{{10, 15}, {100, 125}},
},
{
name: "even more multiple parts",
ranges: [][2]int{{10, 15}, {100, 125}, {250, 252}, {261, 270}, {270, 280}},
},
}
for _, upload := range uploads {
t.Run(upload.name, func(t *testing.T) {
mockStatestore := statestore.NewStateStore()
logger := logging.New(ioutil.Discard, 0)
client, _, _ := newTestServer(t, testServerOptions{
Storer: mock.NewStorer(),
Tags: tags.NewTags(mockStatestore, logger),
Logger: logging.New(ioutil.Discard, 5),
})
uploadReference := upload.reference
jsonhttptest.Request(t, client, http.MethodPost, upload.uploadEndpoint, http.StatusOK,
jsonhttptest.WithRequestBody(upload.reader),
jsonhttptest.WithRequestHeader("Content-Type", upload.contentType),
)
for _, tc := range ranges {
t.Run(tc.name, func(t *testing.T) {
rangeHeader, want := createRangeHeader(data, tc.ranges)
var body []byte
respHeaders := jsonhttptest.Request(t, client, http.MethodGet, upload.downloadEndpoint+"/"+uploadReference+upload.filepath, http.StatusPartialContent,
jsonhttptest.WithRequestHeader("Range", rangeHeader),
jsonhttptest.WithPutResponseBody(&body),
)
got := parseRangeParts(t, respHeaders.Get("Content-Type"), body)
if len(got) != len(want) {
t.Fatalf("got %v parts, want %v parts", len(got), len(want))
}
for i := 0; i < len(want); i++ {
if !bytes.Equal(got[i], want[i]) {
t.Errorf("part %v: got %q, want %q", i, string(got[i]), string(want[i]))
}
}
})
}
})
}
}
func createRangeHeader(data []byte, ranges [][2]int) (header string, parts [][]byte) {
header = "bytes="
for i, r := range ranges {
if i > 0 {
header += ", "
}
if r[0] >= 0 && r[1] >= 0 {
parts = append(parts, data[r[0]:r[1]])
header += fmt.Sprintf("%v-%v", r[0], r[1]-1) // Range: <unit>=<range-start>-<range-end> // end is inclusive
} else {
if r[0] >= 0 {
header += strconv.Itoa(r[0]) // Range: <unit>=<range-start>-
parts = append(parts, data[r[0]:])
}
header += "-"
if r[1] >= 0 {
if r[0] >= 0 {
header += strconv.Itoa(r[1] - 1) // Range: <unit>=<range-start>-<range-end> // end is inclusive
} else {
header += strconv.Itoa(r[1]) // Range: <unit>=-<suffix-length> // the parameter is length
}
parts = append(parts, data[:r[1]])
}
}
}
return
}
func parseRangeParts(t *testing.T, contentType string, body []byte) (parts [][]byte) {
t.Helper()
mimetype, params, _ := mime.ParseMediaType(contentType)
if mimetype != "multipart/byteranges" {
parts = append(parts, body)
return
}
mr := multipart.NewReader(bytes.NewReader(body), params["boundary"])
for part, err := mr.NextPart(); err == nil; part, err = mr.NextPart() {
value, err := ioutil.ReadAll(part)
if err != nil {
t.Fatal(err)
}
parts = append(parts, value)
}
return parts
}
...@@ -47,7 +47,7 @@ func TestPinBytesHandler(t *testing.T) { ...@@ -47,7 +47,7 @@ func TestPinBytesHandler(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost, bytesUploadResource, http.StatusOK, jsonhttptest.Request(t, client, http.MethodPost, bytesUploadResource, http.StatusOK,
jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)), jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithExpectedJSONResponse(api.FileUploadResponse{ jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash), Reference: swarm.MustParseHexAddress(rootHash),
}), }),
) )
...@@ -110,7 +110,7 @@ func TestPinBytesHandler(t *testing.T) { ...@@ -110,7 +110,7 @@ func TestPinBytesHandler(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost, bytesUploadResource, http.StatusOK, jsonhttptest.Request(t, client, http.MethodPost, bytesUploadResource, http.StatusOK,
jsonhttptest.WithRequestBody(bytes.NewReader(b)), jsonhttptest.WithRequestBody(bytes.NewReader(b)),
jsonhttptest.WithExpectedJSONResponse(api.FileUploadResponse{ jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash), Reference: swarm.MustParseHexAddress(rootHash),
}), }),
) )
......
...@@ -22,7 +22,7 @@ import ( ...@@ -22,7 +22,7 @@ import (
func TestPinBzzHandler(t *testing.T) { func TestPinBzzHandler(t *testing.T) {
var ( var (
dirUploadResource = "/dirs" dirUploadResource = "/bzz"
pinBzzResource = "/pin/bzz" pinBzzResource = "/pin/bzz"
pinBzzAddressResource = func(addr string) string { return pinBzzResource + "/" + addr } pinBzzAddressResource = func(addr string) string { return pinBzzResource + "/" + addr }
pinChunksResource = "/pin/chunks" pinChunksResource = "/pin/chunks"
...@@ -35,6 +35,7 @@ func TestPinBzzHandler(t *testing.T) { ...@@ -35,6 +35,7 @@ func TestPinBzzHandler(t *testing.T) {
Storer: mockStorer, Storer: mockStorer,
Traversal: traversalService, Traversal: traversalService,
Tags: tags.NewTags(mockStatestore, logger), Tags: tags.NewTags(mockStatestore, logger),
Logger: logger,
}) })
) )
...@@ -49,13 +50,14 @@ func TestPinBzzHandler(t *testing.T) { ...@@ -49,13 +50,14 @@ func TestPinBzzHandler(t *testing.T) {
tarReader := tarFiles(t, files) tarReader := tarFiles(t, files)
rootHash := "a85aaea6a34a5c7127a3546196f2111f866fe369c6d6562ed5d3313a99388c03" rootHash := "9e178dbd1ed4b748379e25144e28dfb29c07a4b5114896ef454480115a56b237"
// verify directory tar upload response // verify directory tar upload response
jsonhttptest.Request(t, client, http.MethodPost, dirUploadResource, http.StatusOK, jsonhttptest.Request(t, client, http.MethodPost, dirUploadResource, http.StatusOK,
jsonhttptest.WithRequestBody(tarReader), jsonhttptest.WithRequestBody(tarReader),
jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar), jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar),
jsonhttptest.WithExpectedJSONResponse(api.FileUploadResponse{ jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash), Reference: swarm.MustParseHexAddress(rootHash),
}), }),
) )
...@@ -67,7 +69,7 @@ func TestPinBzzHandler(t *testing.T) { ...@@ -67,7 +69,7 @@ func TestPinBzzHandler(t *testing.T) {
}), }),
) )
expectedChunkCount := 7 expectedChunkCount := 3
// get the reference as everytime it will change because of random encryption key // get the reference as everytime it will change because of random encryption key
var resp api.ListPinnedChunksResponse var resp api.ListPinnedChunksResponse
...@@ -82,7 +84,7 @@ func TestPinBzzHandler(t *testing.T) { ...@@ -82,7 +84,7 @@ func TestPinBzzHandler(t *testing.T) {
}) })
t.Run("unpin-bzz-1", func(t *testing.T) { t.Run("unpin-bzz-1", func(t *testing.T) {
rootHash := "a85aaea6a34a5c7127a3546196f2111f866fe369c6d6562ed5d3313a99388c03" rootHash := "9e178dbd1ed4b748379e25144e28dfb29c07a4b5114896ef454480115a56b237"
jsonhttptest.Request(t, client, http.MethodDelete, pinBzzAddressResource(rootHash), http.StatusOK, jsonhttptest.Request(t, client, http.MethodDelete, pinBzzAddressResource(rootHash), http.StatusOK,
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
......
...@@ -48,7 +48,7 @@ func (s *server) pinFile(w http.ResponseWriter, r *http.Request) { ...@@ -48,7 +48,7 @@ func (s *server) pinFile(w http.ResponseWriter, r *http.Request) {
chunkAddressFn := s.pinChunkAddressFn(ctx, addr) chunkAddressFn := s.pinChunkAddressFn(ctx, addr)
err = s.traversal.TraverseFileAddresses(ctx, addr, chunkAddressFn) err = s.traversal.TraverseAddresses(ctx, addr, chunkAddressFn)
if err != nil { if err != nil {
s.logger.Debugf("pin files: traverse chunks: %v, addr %s", err, addr) s.logger.Debugf("pin files: traverse chunks: %v, addr %s", err, addr)
...@@ -93,7 +93,7 @@ func (s *server) unpinFile(w http.ResponseWriter, r *http.Request) { ...@@ -93,7 +93,7 @@ func (s *server) unpinFile(w http.ResponseWriter, r *http.Request) {
chunkAddressFn := s.unpinChunkAddressFn(ctx, addr) chunkAddressFn := s.unpinChunkAddressFn(ctx, addr)
err = s.traversal.TraverseFileAddresses(ctx, addr, chunkAddressFn) err = s.traversal.TraverseAddresses(ctx, addr, chunkAddressFn)
if err != nil { if err != nil {
s.logger.Debugf("pin files: traverse chunks: %v, addr %s", err, addr) s.logger.Debugf("pin files: traverse chunks: %v, addr %s", err, addr)
......
...@@ -8,7 +8,6 @@ import ( ...@@ -8,7 +8,6 @@ import (
"bytes" "bytes"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"sort"
"testing" "testing"
"github.com/ethersphere/bee/pkg/api" "github.com/ethersphere/bee/pkg/api"
...@@ -24,7 +23,7 @@ import ( ...@@ -24,7 +23,7 @@ import (
func TestPinFilesHandler(t *testing.T) { func TestPinFilesHandler(t *testing.T) {
var ( var (
fileUploadResource = "/files" fileUploadResource = "/bzz"
pinFilesResource = "/pin/files" pinFilesResource = "/pin/files"
pinFilesAddressResource = func(addr string) string { return pinFilesResource + "/" + addr } pinFilesAddressResource = func(addr string) string { return pinFilesResource + "/" + addr }
pinChunksResource = "/pin/chunks" pinChunksResource = "/pin/chunks"
...@@ -39,17 +38,20 @@ func TestPinFilesHandler(t *testing.T) { ...@@ -39,17 +38,20 @@ func TestPinFilesHandler(t *testing.T) {
Storer: mockStorer, Storer: mockStorer,
Traversal: traversalService, Traversal: traversalService,
Tags: tags.NewTags(mockStatestore, logger), Tags: tags.NewTags(mockStatestore, logger),
Logger: logger,
}) })
) )
t.Run("pin-file-1", func(t *testing.T) { t.Run("pin-file-1", func(t *testing.T) {
rootHash := "dc82503e0ed041a57327ad558d7aa69a867024c8221306c461ae359dc34d1c6a" rootHash := "dd13a5a6cc9db3ef514d645e6719178dbfb1a90b49b9262cafce35b0d27cf245"
metadataHash := "d936d7180f230b3424842ea10848aa205f2f0e830cb9cc7588a39c9381544bf9" metadataHash := "0cc878d32c96126d47f63fbe391114ee1438cd521146fc975dea1546d302b6c0"
metadataHash2 := "a14d1ef845307c634e9ec74539bd668d0d1b37f37de4128939d57098135850da"
contentHash := "838d0a193ecd1152d1bb1432d5ecc02398533b2494889e23b8bd5ace30ac2aeb" contentHash := "838d0a193ecd1152d1bb1432d5ecc02398533b2494889e23b8bd5ace30ac2aeb"
jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource, http.StatusOK, jsonhttptest.Request(t, client, http.MethodPost,
fileUploadResource+"?name=somefile.txt", http.StatusOK,
jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)), jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithExpectedJSONResponse(api.FileUploadResponse{ jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash), Reference: swarm.MustParseHexAddress(rootHash),
}), }),
jsonhttptest.WithRequestHeader("Content-Type", "text/plain"), jsonhttptest.WithRequestHeader("Content-Type", "text/plain"),
...@@ -62,27 +64,36 @@ func TestPinFilesHandler(t *testing.T) { ...@@ -62,27 +64,36 @@ func TestPinFilesHandler(t *testing.T) {
}), }),
) )
hashes := []string{rootHash, metadataHash, contentHash} hashes := map[string]int{
sort.Strings(hashes) rootHash: 1,
metadataHash: 1,
expectedResponse := api.ListPinnedChunksResponse{ metadataHash2: 1,
Chunks: []api.PinnedChunk{}, contentHash: 1,
} }
for _, h := range hashes { actualResponse := api.ListPinnedChunksResponse{
expectedResponse.Chunks = append(expectedResponse.Chunks, api.PinnedChunk{ Chunks: []api.PinnedChunk{},
Address: swarm.MustParseHexAddress(h),
PinCounter: 1,
})
} }
jsonhttptest.Request(t, client, http.MethodGet, pinChunksResource, http.StatusOK, jsonhttptest.Request(t, client, http.MethodGet, pinChunksResource, http.StatusOK,
jsonhttptest.WithExpectedJSONResponse(expectedResponse), jsonhttptest.WithUnmarshalJSONResponse(&actualResponse),
) )
if len(actualResponse.Chunks) != len(hashes) {
t.Fatalf("Response chunk count mismatch Expected: %d Found: %d",
len(hashes), len(actualResponse.Chunks))
}
for _, v := range actualResponse.Chunks {
if counter, ok := hashes[v.Address.String()]; !ok {
t.Fatalf("found unexpected hash %s", v.Address.String())
} else if uint64(counter) != v.PinCounter {
t.Fatalf("found unexpected pin counter: Expected: %d, Found: %d",
counter, v.PinCounter)
}
}
}) })
t.Run("unpin-file-1", func(t *testing.T) { t.Run("unpin-file-1", func(t *testing.T) {
rootHash := "dc82503e0ed041a57327ad558d7aa69a867024c8221306c461ae359dc34d1c6a" rootHash := "dd13a5a6cc9db3ef514d645e6719178dbfb1a90b49b9262cafce35b0d27cf245"
jsonhttptest.Request(t, client, http.MethodDelete, pinFilesAddressResource(rootHash), http.StatusOK, jsonhttptest.Request(t, client, http.MethodDelete, pinFilesAddressResource(rootHash), http.StatusOK,
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
......
...@@ -38,26 +38,6 @@ func (s *server) setupRouting() { ...@@ -38,26 +38,6 @@ func (s *server) setupRouting() {
fmt.Fprintln(w, "User-agent: *\nDisallow: /") fmt.Fprintln(w, "User-agent: *\nDisallow: /")
}) })
handle(router, "/files", jsonhttp.MethodHandler{
"POST": web.ChainHandlers(
s.newTracingHandler("files-upload"),
web.FinalHandlerFunc(s.fileUploadHandler),
),
})
handle(router, "/files/{addr}", jsonhttp.MethodHandler{
"GET": web.ChainHandlers(
s.newTracingHandler("files-download"),
web.FinalHandlerFunc(s.fileDownloadHandler),
),
})
handle(router, "/dirs", jsonhttp.MethodHandler{
"POST": web.ChainHandlers(
s.newTracingHandler("dirs-upload"),
web.FinalHandlerFunc(s.dirUploadHandler),
),
})
handle(router, "/bytes", jsonhttp.MethodHandler{ handle(router, "/bytes", jsonhttp.MethodHandler{
"POST": web.ChainHandlers( "POST": web.ChainHandlers(
s.newTracingHandler("bytes-upload"), s.newTracingHandler("bytes-upload"),
...@@ -97,6 +77,12 @@ func (s *server) setupRouting() { ...@@ -97,6 +77,12 @@ func (s *server) setupRouting() {
), ),
}) })
handle(router, "/bzz", jsonhttp.MethodHandler{
"POST": web.ChainHandlers(
s.newTracingHandler("bzz-upload"),
web.FinalHandlerFunc(s.bzzUploadHandler),
),
})
handle(router, "/bzz/{address}", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { handle(router, "/bzz/{address}", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
u := r.URL u := r.URL
u.Path += "/" u.Path += "/"
......
...@@ -35,8 +35,7 @@ func tagsWithIdResource(id uint32) string { return fmt.Sprintf("/tags/%d", id) } ...@@ -35,8 +35,7 @@ func tagsWithIdResource(id uint32) string { return fmt.Sprintf("/tags/%d", id) }
func TestTags(t *testing.T) { func TestTags(t *testing.T) {
var ( var (
filesResource = "/files" bzzResource = "/bzz"
dirResource = "/dirs"
bytesResource = "/bytes" bytesResource = "/bytes"
chunksResource = "/chunks" chunksResource = "/chunks"
tagsResource = "/tags" tagsResource = "/tags"
...@@ -47,6 +46,7 @@ func TestTags(t *testing.T) { ...@@ -47,6 +46,7 @@ func TestTags(t *testing.T) {
client, _, _ = newTestServer(t, testServerOptions{ client, _, _ = newTestServer(t, testServerOptions{
Storer: mock.NewStorer(), Storer: mock.NewStorer(),
Tags: tag, Tags: tag,
Logger: logger,
}) })
) )
...@@ -263,10 +263,11 @@ func TestTags(t *testing.T) { ...@@ -263,10 +263,11 @@ func TestTags(t *testing.T) {
t.Run("file tags", func(t *testing.T) { t.Run("file tags", func(t *testing.T) {
// upload a file without supplying tag // upload a file without supplying tag
expectedHash := swarm.MustParseHexAddress("8e27bb803ff049e8c2f4650357026723220170c15ebf9b635a7026539879a1a8") expectedHash := swarm.MustParseHexAddress("40e739ebdfd18292925bba4138cd097db9aa18c1b57e74042f48469b48da33a8")
expectedResponse := api.FileUploadResponse{Reference: expectedHash} expectedResponse := api.BzzUploadResponse{Reference: expectedHash}
respHeaders := jsonhttptest.Request(t, client, http.MethodPost, filesResource, http.StatusOK, respHeaders := jsonhttptest.Request(t, client, http.MethodPost,
bzzResource+"?name=somefile", http.StatusOK,
jsonhttptest.WithRequestBody(bytes.NewReader([]byte("some data"))), jsonhttptest.WithRequestBody(bytes.NewReader([]byte("some data"))),
jsonhttptest.WithExpectedJSONResponse(expectedResponse), jsonhttptest.WithExpectedJSONResponse(expectedResponse),
jsonhttptest.WithRequestHeader("Content-Type", "application/octet-stream"), jsonhttptest.WithRequestHeader("Content-Type", "application/octet-stream"),
...@@ -276,7 +277,7 @@ func TestTags(t *testing.T) { ...@@ -276,7 +277,7 @@ func TestTags(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
tagValueTest(t, uint32(tagId), 3, 3, 0, 0, 0, 3, expectedHash, client) tagValueTest(t, uint32(tagId), 4, 4, 0, 0, 0, 4, expectedHash, client)
}) })
t.Run("dir tags", func(t *testing.T) { t.Run("dir tags", func(t *testing.T) {
...@@ -285,11 +286,12 @@ func TestTags(t *testing.T) { ...@@ -285,11 +286,12 @@ func TestTags(t *testing.T) {
data: []byte("some dir data"), data: []byte("some dir data"),
name: "binary-file", name: "binary-file",
}}) }})
expectedHash := swarm.MustParseHexAddress("3dc643abeb3db60a4dfb72008b577dd9a573abaa74c6afe37a75c63ceea829f6") expectedHash := swarm.MustParseHexAddress("42bc27c9137c93705ffbc2945fa1aab0e8e1826f1500b7f06f6e3f86f617213b")
expectedResponse := api.FileUploadResponse{Reference: expectedHash} expectedResponse := api.BzzUploadResponse{Reference: expectedHash}
respHeaders := jsonhttptest.Request(t, client, http.MethodPost, dirResource, http.StatusOK, respHeaders := jsonhttptest.Request(t, client, http.MethodPost, bzzResource, http.StatusOK,
jsonhttptest.WithRequestBody(tarReader), jsonhttptest.WithRequestBody(tarReader),
jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
jsonhttptest.WithExpectedJSONResponse(expectedResponse), jsonhttptest.WithExpectedJSONResponse(expectedResponse),
jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar), jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar),
) )
...@@ -298,7 +300,7 @@ func TestTags(t *testing.T) { ...@@ -298,7 +300,7 @@ func TestTags(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
tagValueTest(t, uint32(tagId), 7, 7, 0, 0, 0, 7, expectedHash, client) tagValueTest(t, uint32(tagId), 3, 3, 0, 0, 0, 3, expectedHash, client)
}) })
t.Run("bytes tags", func(t *testing.T) { t.Run("bytes tags", func(t *testing.T) {
......
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package collection provides high-level abstractions for collections of files
package collection
import (
"github.com/ethersphere/bee/pkg/swarm"
)
// Collection provides a specific ordering of a collection of binary data vectors
// stored in bee.
type Collection interface {
Addresses() []swarm.Address
}
// Entry encapsulates data defining a single file entry.
// It may contain any number of data blobs providing context to the
// given data vector concealed by Reference.
type Entry interface {
Reference() swarm.Address
Metadata() swarm.Address
}
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package entry
import (
"errors"
"math"
"github.com/ethersphere/bee/pkg/collection"
"github.com/ethersphere/bee/pkg/encryption"
"github.com/ethersphere/bee/pkg/swarm"
)
var (
_ = collection.Entry(&Entry{})
serializedDataSize = swarm.SectionSize * 2
encryptedSerializedDataSize = encryption.ReferenceSize * 2
)
// Entry provides addition of metadata to a data reference.
// Implements collection.Entry.
type Entry struct {
reference swarm.Address
metadata swarm.Address
}
// New creates a new Entry.
func New(reference, metadata swarm.Address) *Entry {
return &Entry{
reference: reference,
metadata: metadata,
}
}
// CanUnmarshal returns whether the entry may be might be unmarshaled based on
// the size.
func CanUnmarshal(size int64) bool {
if size < math.MaxInt32 {
switch int(size) {
case serializedDataSize, encryptedSerializedDataSize:
return true
}
}
return false
}
// Reference implements collection.Entry
func (e *Entry) Reference() swarm.Address {
return e.reference
}
// Metadata implements collection.Entry
func (e *Entry) Metadata() swarm.Address {
return e.metadata
}
// MarshalBinary implements encoding.BinaryMarshaler
func (e *Entry) MarshalBinary() ([]byte, error) {
br := e.reference.Bytes()
bm := e.metadata.Bytes()
b := append(br, bm...)
return b, nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler
func (e *Entry) UnmarshalBinary(b []byte) error {
var size int
if len(b) == serializedDataSize {
size = serializedDataSize
} else if len(b) == encryptedSerializedDataSize {
size = encryptedSerializedDataSize
} else {
return errors.New("invalid data length")
}
e.reference = swarm.NewAddress(b[:size/2])
e.metadata = swarm.NewAddress(b[size/2:])
return nil
}
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package entry_test
import (
"testing"
"github.com/ethersphere/bee/pkg/collection/entry"
"github.com/ethersphere/bee/pkg/swarm/test"
)
// TestEntrySerialize verifies integrity of serialization.
func TestEntrySerialize(t *testing.T) {
referenceAddress := test.RandomAddress()
metadataAddress := test.RandomAddress()
e := entry.New(referenceAddress, metadataAddress)
entrySerialized, err := e.MarshalBinary()
if err != nil {
t.Fatal(err)
}
entryRecovered := &entry.Entry{}
err = entryRecovered.UnmarshalBinary(entrySerialized)
if err != nil {
t.Fatal(err)
}
if !referenceAddress.Equal(entryRecovered.Reference()) {
t.Fatalf("expected reference %s, got %s", referenceAddress, entryRecovered.Reference())
}
metadataAddressRecovered := entryRecovered.Metadata()
if !metadataAddress.Equal(metadataAddressRecovered) {
t.Fatalf("expected metadata %s, got %s", metadataAddress, metadataAddressRecovered)
}
}
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package entry
import (
"encoding/json"
)
// Metadata provides mime type and filename to file entry.
type Metadata struct {
MimeType string `json:"mimetype"`
Filename string `json:"filename"`
}
// NewMetadata creates a new Metadata.
func NewMetadata(fileName string) *Metadata {
return &Metadata{
Filename: fileName,
}
}
func (m *Metadata) String() string {
j, _ := json.Marshal(m)
return string(j)
}
...@@ -11,17 +11,22 @@ import ( ...@@ -11,17 +11,22 @@ import (
"github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/swarm"
) )
type PutGetter interface {
storage.Putter
storage.Getter
}
// loadSave is needed for manifest operations and provides // loadSave is needed for manifest operations and provides
// simple wrapping over load and save operations using file // simple wrapping over load and save operations using file
// package abstractions. use with caution since Loader will // package abstractions. use with caution since Loader will
// load all of the subtrie of a given hash in memory. // load all of the subtrie of a given hash in memory.
type loadSave struct { type loadSave struct {
storer storage.Storer storer PutGetter
mode storage.ModePut mode storage.ModePut
encrypted bool encrypted bool
} }
func New(storer storage.Storer, mode storage.ModePut, enc bool) file.LoadSaver { func New(storer PutGetter, mode storage.ModePut, enc bool) file.LoadSaver {
return &loadSave{ return &loadSave{
storer: storer, storer: storer,
mode: mode, mode: mode,
......
...@@ -16,6 +16,14 @@ import ( ...@@ -16,6 +16,14 @@ import (
const DefaultManifestType = ManifestMantarayContentType const DefaultManifestType = ManifestMantarayContentType
const (
RootPath = "/"
WebsiteIndexDocumentSuffixKey = "website-index-document"
WebsiteErrorDocumentPathKey = "website-error-document"
EntryMetadataContentTypeKey = "Content-Type"
EntryMetadataFilenameKey = "Filename"
)
var ( var (
// ErrNotFound is returned when an Entry is not found in the manifest. // ErrNotFound is returned when an Entry is not found in the manifest.
ErrNotFound = errors.New("manifest: not found") ErrNotFound = errors.New("manifest: not found")
...@@ -68,6 +76,14 @@ func NewDefaultManifest( ...@@ -68,6 +76,14 @@ func NewDefaultManifest(
return NewManifest(DefaultManifestType, ls, encrypted) return NewManifest(DefaultManifestType, ls, encrypted)
} }
// NewDefaultManifest creates a new manifest with default type.
func NewDefaultManifestReference(
reference swarm.Address,
ls file.LoadSaver,
) (Interface, error) {
return NewManifestReference(DefaultManifestType, reference, ls)
}
// NewManifest creates a new manifest. // NewManifest creates a new manifest.
func NewManifest( func NewManifest(
manifestType string, manifestType string,
......
...@@ -149,7 +149,7 @@ func (m *mantarayManifest) IterateAddresses(ctx context.Context, fn swarm.Addres ...@@ -149,7 +149,7 @@ func (m *mantarayManifest) IterateAddresses(ctx context.Context, fn swarm.Addres
} }
} }
if node.IsValueType() && node.Entry() != nil { if node.IsValueType() && len(node.Entry()) > 0 {
entry := swarm.NewAddress(node.Entry()) entry := swarm.NewAddress(node.Entry())
err = fn(entry) err = fn(entry)
if err != nil { if err != nil {
......
...@@ -270,6 +270,13 @@ func (n *Node) UnmarshalBinary(data []byte) error { ...@@ -270,6 +270,13 @@ func (n *Node) UnmarshalBinary(data []byte) error {
n.entry = append([]byte{}, data[nodeHeaderSize:nodeHeaderSize+refBytesSize]...) n.entry = append([]byte{}, data[nodeHeaderSize:nodeHeaderSize+refBytesSize]...)
offset := nodeHeaderSize + refBytesSize // skip entry offset := nodeHeaderSize + refBytesSize // skip entry
// Currently we don't persist the root nodeType when we marshal the manifest, as a result
// the root nodeType information is lost on Unmarshal. This causes issues when we want to
// perform a path 'Walk' on the root. If there is more than 1 fork, the root node type
// is an edge, so we will deduce this information from index byte array
if !bytes.Equal(data[offset:offset+32], make([]byte, 32)) {
n.nodeType = nodeTypeEdge
}
n.forks = make(map[byte]*fork) n.forks = make(map[byte]*fork)
bb := &bitsForBytes{} bb := &bitsForBytes{}
bb.fromBytes(data[offset:]) bb.fromBytes(data[offset:])
......
...@@ -9,14 +9,10 @@ ...@@ -9,14 +9,10 @@
package traversal package traversal
import ( import (
"bytes"
"context" "context"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"github.com/ethersphere/bee/pkg/collection/entry"
"github.com/ethersphere/bee/pkg/file"
"github.com/ethersphere/bee/pkg/file/joiner" "github.com/ethersphere/bee/pkg/file/joiner"
"github.com/ethersphere/bee/pkg/file/loadsave" "github.com/ethersphere/bee/pkg/file/loadsave"
"github.com/ethersphere/bee/pkg/manifest" "github.com/ethersphere/bee/pkg/manifest"
...@@ -37,8 +33,6 @@ type Service interface { ...@@ -37,8 +33,6 @@ type Service interface {
// TraverseBytesAddresses iterates through each address of a bytes. // TraverseBytesAddresses iterates through each address of a bytes.
TraverseBytesAddresses(context.Context, swarm.Address, swarm.AddressIterFunc) error TraverseBytesAddresses(context.Context, swarm.Address, swarm.AddressIterFunc) error
// TraverseFileAddresses iterates through each address of a file.
TraverseFileAddresses(context.Context, swarm.Address, swarm.AddressIterFunc) error
// TraverseManifestAddresses iterates through each address of a manifest, // TraverseManifestAddresses iterates through each address of a manifest,
// as well as each entry found in it. // as well as each entry found in it.
TraverseManifestAddresses(context.Context, swarm.Address, swarm.AddressIterFunc) error TraverseManifestAddresses(context.Context, swarm.Address, swarm.AddressIterFunc) error
...@@ -60,57 +54,17 @@ func (s *traversalService) TraverseAddresses( ...@@ -60,57 +54,17 @@ func (s *traversalService) TraverseAddresses(
chunkAddressFunc swarm.AddressIterFunc, chunkAddressFunc swarm.AddressIterFunc,
) error { ) error {
isFile, e, metadata, err := s.checkIsFile(ctx, reference) isManifest, m, err := s.checkIsManifest(ctx, reference)
if err != nil { if err != nil {
return err return err
} }
// reference address could be missrepresented as file when: if isManifest {
// - content size is 64 bytes (or 128 for encrypted reference) return m.IterateAddresses(ctx, func(manifestNodeAddr swarm.Address) error {
// - second reference exists and is JSON (and not actually file metadata) return s.processBytes(ctx, manifestNodeAddr, chunkAddressFunc)
})
if isFile {
isManifest, m, err := s.checkIsManifest(ctx, reference, e, metadata)
if err != nil {
return err
}
// reference address could be missrepresented as manifest when:
// - file content type is actually on of manifest type (manually set)
// - content was unmarshalled
//
// even though content could be unmarshaled in some case, iteration
// through addresses will not be possible
if isManifest {
// process as manifest
err = m.IterateAddresses(ctx, func(manifestNodeAddr swarm.Address) error {
return s.traverseChunkAddressesFromManifest(ctx, manifestNodeAddr, chunkAddressFunc)
})
if err != nil {
return fmt.Errorf("traversal: iterate chunks: %s: %w", reference, err)
}
metadataReference := e.Metadata()
err = s.processBytes(ctx, metadataReference, chunkAddressFunc)
if err != nil {
return err
}
_ = chunkAddressFunc(reference)
} else {
return s.traverseChunkAddressesAsFile(ctx, reference, chunkAddressFunc, e)
}
} else {
return s.processBytes(ctx, reference, chunkAddressFunc)
} }
return s.processBytes(ctx, reference, chunkAddressFunc)
return nil
} }
func (s *traversalService) TraverseBytesAddresses( func (s *traversalService) TraverseBytesAddresses(
...@@ -121,213 +75,39 @@ func (s *traversalService) TraverseBytesAddresses( ...@@ -121,213 +75,39 @@ func (s *traversalService) TraverseBytesAddresses(
return s.processBytes(ctx, reference, chunkAddressFunc) return s.processBytes(ctx, reference, chunkAddressFunc)
} }
func (s *traversalService) TraverseFileAddresses(
ctx context.Context,
reference swarm.Address,
chunkAddressFunc swarm.AddressIterFunc,
) error {
isFile, e, _, err := s.checkIsFile(ctx, reference)
if err != nil {
return err
}
// reference address could be missrepresented as file when:
// - content size is 64 bytes (or 128 for encrypted reference)
// - second reference exists and is JSON (and not actually file metadata)
if !isFile {
return ErrInvalidType
}
return s.traverseChunkAddressesAsFile(ctx, reference, chunkAddressFunc, e)
}
func (s *traversalService) TraverseManifestAddresses( func (s *traversalService) TraverseManifestAddresses(
ctx context.Context, ctx context.Context,
reference swarm.Address, reference swarm.Address,
chunkAddressFunc swarm.AddressIterFunc, chunkAddressFunc swarm.AddressIterFunc,
) error { ) error {
isFile, e, metadata, err := s.checkIsFile(ctx, reference) isManifest, m, err := s.checkIsManifest(ctx, reference)
if err != nil { if err != nil {
return err return err
} }
if !isFile {
return ErrInvalidType
}
isManifest, m, err := s.checkIsManifest(ctx, reference, e, metadata)
if err != nil {
return err
}
// reference address could be missrepresented as manifest when:
// - file content type is actually on of manifest type (manually set)
// - content was unmarshalled
//
// even though content could be unmarshaled in some case, iteration
// through addresses will not be possible
if !isManifest { if !isManifest {
return ErrInvalidType return ErrInvalidType
} }
err = m.IterateAddresses(ctx, func(manifestNodeAddr swarm.Address) error { err = m.IterateAddresses(ctx, func(manifestNodeAddr swarm.Address) error {
return s.traverseChunkAddressesFromManifest(ctx, manifestNodeAddr, chunkAddressFunc) return s.processBytes(ctx, manifestNodeAddr, chunkAddressFunc)
}) })
if err != nil { if err != nil {
return fmt.Errorf("traversal: iterate chunks: %s: %w", reference, err) return fmt.Errorf("traversal: iterate chunks: %s: %w", reference, err)
} }
metadataReference := e.Metadata()
err = s.processBytes(ctx, metadataReference, chunkAddressFunc)
if err != nil {
return err
}
_ = chunkAddressFunc(reference)
return nil return nil
} }
func (s *traversalService) traverseChunkAddressesFromManifest(
ctx context.Context,
reference swarm.Address,
chunkAddressFunc swarm.AddressIterFunc,
) error {
isFile, e, _, err := s.checkIsFile(ctx, reference)
if err != nil {
return err
}
if isFile {
return s.traverseChunkAddressesAsFile(ctx, reference, chunkAddressFunc, e)
}
return s.processBytes(ctx, reference, chunkAddressFunc)
}
func (s *traversalService) traverseChunkAddressesAsFile(
ctx context.Context,
reference swarm.Address,
chunkAddressFunc swarm.AddressIterFunc,
e *entry.Entry,
) (err error) {
bytesReference := e.Reference()
err = s.processBytes(ctx, bytesReference, chunkAddressFunc)
if err != nil {
// possible it was custom JSON bytes, which matches entry JSON
// but in fact is not file, and does not contain reference to
// existing address, which is why it was not found in storage
if !errors.Is(err, storage.ErrNotFound) {
return nil
}
// ignore
}
metadataReference := e.Metadata()
err = s.processBytes(ctx, metadataReference, chunkAddressFunc)
if err != nil {
return
}
_ = chunkAddressFunc(reference)
return nil
}
// checkIsFile checks if the content is file.
func (s *traversalService) checkIsFile(
ctx context.Context,
reference swarm.Address,
) (isFile bool, e *entry.Entry, metadata *entry.Metadata, err error) {
var (
j file.Joiner
span int64
)
j, span, err = joiner.New(ctx, s.storer, reference)
if err != nil {
err = fmt.Errorf("traversal: joiner: %s: %w", reference, err)
return
}
maybeIsFile := entry.CanUnmarshal(span)
if maybeIsFile {
buf := bytes.NewBuffer(nil)
_, err = file.JoinReadAll(ctx, j, buf)
if err != nil {
err = fmt.Errorf("traversal: read entry: %s: %w", reference, err)
return
}
e = &entry.Entry{}
err = e.UnmarshalBinary(buf.Bytes())
if err != nil {
err = fmt.Errorf("traversal: unmarshal entry: %s: %w", reference, err)
return
}
// address sizes must match
if len(reference.Bytes()) != len(e.Reference().Bytes()) {
return
}
// NOTE: any bytes will unmarshall to addresses; we need to check metadata
// read metadata
j, _, err = joiner.New(ctx, s.storer, e.Metadata())
if err != nil {
// ignore
err = nil
return
}
buf = bytes.NewBuffer(nil)
_, err = file.JoinReadAll(ctx, j, buf)
if err != nil {
err = fmt.Errorf("traversal: read metadata: %s: %w", reference, err)
return
}
metadata = &entry.Metadata{}
dec := json.NewDecoder(buf)
dec.DisallowUnknownFields()
err = dec.Decode(metadata)
if err != nil {
// may not be metadata JSON
err = nil
return
}
isFile = true
}
return
}
// checkIsManifest checks if the content is manifest. // checkIsManifest checks if the content is manifest.
func (s *traversalService) checkIsManifest( func (s *traversalService) checkIsManifest(
ctx context.Context, ctx context.Context,
reference swarm.Address, reference swarm.Address,
e *entry.Entry,
metadata *entry.Metadata,
) (isManifest bool, m manifest.Interface, err error) { ) (isManifest bool, m manifest.Interface, err error) {
// NOTE: 'encrypted' parameter only used for saving manifest // NOTE: 'encrypted' parameter only used for saving manifest
m, err = manifest.NewManifestReference( m, err = manifest.NewDefaultManifestReference(
metadata.MimeType, reference,
e.Reference(),
loadsave.New(s.storer, storage.ModePutRequest, false), loadsave.New(s.storer, storage.ModePutRequest, false),
) )
if err != nil { if err != nil {
...@@ -339,9 +119,7 @@ func (s *traversalService) checkIsManifest( ...@@ -339,9 +119,7 @@ func (s *traversalService) checkIsManifest(
err = fmt.Errorf("traversal: read manifest: %s: %w", reference, err) err = fmt.Errorf("traversal: read manifest: %s: %w", reference, err)
return return
} }
isManifest = true isManifest = true
return return
} }
......
...@@ -7,18 +7,14 @@ package traversal_test ...@@ -7,18 +7,14 @@ package traversal_test
import ( import (
"bytes" "bytes"
"context" "context"
"encoding/json"
"fmt" "fmt"
"math" "math"
"mime"
"path" "path"
"sort" "sort"
"strings"
"sync" "sync"
"testing" "testing"
"time" "time"
"github.com/ethersphere/bee/pkg/collection/entry"
"github.com/ethersphere/bee/pkg/file/loadsave" "github.com/ethersphere/bee/pkg/file/loadsave"
"github.com/ethersphere/bee/pkg/file/pipeline/builder" "github.com/ethersphere/bee/pkg/file/pipeline/builder"
"github.com/ethersphere/bee/pkg/manifest" "github.com/ethersphere/bee/pkg/manifest"
...@@ -29,7 +25,8 @@ import ( ...@@ -29,7 +25,8 @@ import (
) )
var ( var (
simpleData = []byte("hello test world") // fixed, 16 bytes simpleData = []byte("hello test world") // fixed, 16 bytes
defaultMediaType = "bzz-manifest-mantaray"
) )
func generateSampleData(size int) (b []byte) { func generateSampleData(size int) (b []byte) {
...@@ -151,12 +148,13 @@ func TestTraversalBytes(t *testing.T) { ...@@ -151,12 +148,13 @@ func TestTraversalBytes(t *testing.T) {
func TestTraversalFiles(t *testing.T) { func TestTraversalFiles(t *testing.T) {
traverseFn := func(traversalService traversal.Service) func(context.Context, swarm.Address, swarm.AddressIterFunc) error { traverseFn := func(traversalService traversal.Service) func(context.Context, swarm.Address, swarm.AddressIterFunc) error {
return traversalService.TraverseFileAddresses return traversalService.TraverseAddresses
} }
testCases := []struct { testCases := []struct {
filesSize int filesSize int
contentType string contentType string
filename string
expectedHashesCount int expectedHashesCount int
expectedHashes []string expectedHashes []string
ignoreDuplicateHash bool ignoreDuplicateHash bool
...@@ -164,31 +162,38 @@ func TestTraversalFiles(t *testing.T) { ...@@ -164,31 +162,38 @@ func TestTraversalFiles(t *testing.T) {
{ {
filesSize: len(simpleData), filesSize: len(simpleData),
contentType: "text/plain; charset=utf-8", contentType: "text/plain; charset=utf-8",
expectedHashesCount: 3, filename: "simple.txt",
expectedHashesCount: 4,
expectedHashes: []string{ expectedHashes: []string{
"06e50210b6bcebca15cfc8bc9ee3aa51ad8fa9cac41340f9f6396ada74fec78f", // root "ae16fb27474b41273c0deb355e4405d3cd0a6639f834285f97c75636c9e29df7", // root manifest
"999a9f2e1fd29a6691a3b8e437cbb36e34a1f67decc973dfc70928d1e7de3c3b", // metadata "0cc878d32c96126d47f63fbe391114ee1438cd521146fc975dea1546d302b6c0", // mainifest root metadata
"05e34f11a0967e8c09968b69c4f486f569ef58a31a197992e01304a1e59f8e75", // manifest file entry
"e94a5aadf259f008b7d5039420c65d692901846523f503d97d24e2f077786d9a", // bytes "e94a5aadf259f008b7d5039420c65d692901846523f503d97d24e2f077786d9a", // bytes
}, },
}, },
{ {
filesSize: swarm.ChunkSize, filesSize: swarm.ChunkSize,
contentType: "text/plain; charset=utf-8", contentType: "text/plain; charset=utf-8",
expectedHashesCount: 3, expectedHashesCount: 6,
expectedHashes: []string{ expectedHashes: []string{
"29ae87fda18bee4255ef19faabe901e2cf9c1c5c4648083383255670492e814e", // root "7e0a4b6cd542eb501f372438cbbbcd8a82c444740f00bdd54f4981f487bcf8b7", // root manifest
"e7d4d4a897cd69f5759621044402e40a3d5c903cf1e225864eef5d1f77d97680", // metadata "0cc878d32c96126d47f63fbe391114ee1438cd521146fc975dea1546d302b6c0", // manifest root metadata
"f833c17be12d68aec95eca7f9d993f7d7aaa7a9c282eb2c3d79ab26a5aeaf384", // bytes (4096) "3f538c3b5225111a79b3b1dbb5e269ca2115f2a7caf0e6925b773457cdef7be5", // manifest file entry (Edge)
"2f09e41846a24201758db3535dc6c42d738180c8874d4d40d4f2924d0091521f", // manifest file entry (Edge)
"b2662d17d51ce734695d993b44c0e2df34c3f50d5889e5bc3b8718838658e6b0", // manifest file entry (Value)
"f833c17be12d68aec95eca7f9d993f7d7aaa7a9c282eb2c3d79ab26a5aeaf384", // bytes
}, },
}, },
{ {
filesSize: swarm.ChunkSize + 1, filesSize: swarm.ChunkSize + 1,
contentType: "text/plain; charset=utf-8", contentType: "text/plain; charset=utf-8",
expectedHashesCount: 5, filename: "simple.txt",
expectedHashesCount: 6,
expectedHashes: []string{ expectedHashes: []string{
"aa4a46bfbdff91c8db555edcfa4ba18371a083fdec67120db58d7ef177815ff0", // root "ea58761906f98bd88204efbbab5c690329af02548afec37d7a556a47ca78ac62", // manifest root
"be1f048819e744886803fbe44cf16205949b196640665077bfcacf68c323aa49", // metadata "0cc878d32c96126d47f63fbe391114ee1438cd521146fc975dea1546d302b6c0", // manifest root metadata
"a1c4483d15167aeb406017942c9625464574cf70bf7e42f237094acbccdb6834", // bytes (joiner) "85617df0249a12649b56d09cf7f21e8642627b4fb9c0c9e03e2d25340cf60499", // manifest file entry
"a1c4483d15167aeb406017942c9625464574cf70bf7e42f237094acbccdb6834", // manifest file entry
"f833c17be12d68aec95eca7f9d993f7d7aaa7a9c282eb2c3d79ab26a5aeaf384", // bytes (4096) "f833c17be12d68aec95eca7f9d993f7d7aaa7a9c282eb2c3d79ab26a5aeaf384", // bytes (4096)
"dcbfb467950a28f8c5023b86d31de4ff3a337993e921ae623ae62c7190d60329", // bytes (1) "dcbfb467950a28f8c5023b86d31de4ff3a337993e921ae623ae62c7190d60329", // bytes (1)
}, },
...@@ -215,29 +220,34 @@ func TestTraversalFiles(t *testing.T) { ...@@ -215,29 +220,34 @@ func TestTraversalFiles(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
fileName := fr.String() ls := loadsave.New(mockStorer, storage.ModePutRequest, false)
fManifest, err := manifest.NewDefaultManifest(ls, false)
m := entry.NewMetadata(fileName)
m.MimeType = tc.contentType
metadataBytes, err := json.Marshal(m)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
filename := tc.filename
if filename == "" {
filename = fr.String()
}
pipe = builder.NewPipelineBuilder(ctx, mockStorer, storage.ModePutUpload, false) rootMtdt := map[string]string{
mr, err := builder.FeedPipeline(ctx, pipe, bytes.NewReader(metadataBytes), int64(len(metadataBytes))) manifest.WebsiteIndexDocumentSuffixKey: filename,
}
err = fManifest.Add(ctx, "/", manifest.NewEntry(swarm.ZeroAddress, rootMtdt))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
entrie := entry.New(fr, mr) fileMtdt := map[string]string{
fileEntryBytes, err := entrie.MarshalBinary() manifest.EntryMetadataFilenameKey: filename,
manifest.EntryMetadataContentTypeKey: tc.contentType,
}
err = fManifest.Add(ctx, filename, manifest.NewEntry(fr, fileMtdt))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
pipe = builder.NewPipelineBuilder(ctx, mockStorer, storage.ModePutUpload, false) reference, err := fManifest.Store(ctx)
reference, err := builder.FeedPipeline(ctx, pipe, bytes.NewReader(fileEntryBytes), int64(len(fileEntryBytes)))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
...@@ -249,16 +259,14 @@ func TestTraversalFiles(t *testing.T) { ...@@ -249,16 +259,14 @@ func TestTraversalFiles(t *testing.T) {
} }
type file struct { type file struct {
size int size int
dir string dir string
name string name string
reference string chunks fileChunks
chunks fileChunks
} }
type fileChunks struct { type fileChunks struct {
metadata string content []string
content []string
} }
func TestTraversalManifest(t *testing.T) { func TestTraversalManifest(t *testing.T) {
...@@ -267,93 +275,18 @@ func TestTraversalManifest(t *testing.T) { ...@@ -267,93 +275,18 @@ func TestTraversalManifest(t *testing.T) {
} }
testCases := []struct { testCases := []struct {
manifestType string
files []file files []file
manifestHashes []string manifestHashes []string
expectedHashesCount int expectedHashesCount int
ignoreDuplicateHash bool ignoreDuplicateHash bool
}{ }{
{ {
manifestType: manifest.ManifestSimpleContentType,
files: []file{
{
size: len(simpleData),
dir: "",
name: "hello.txt",
reference: "a7c9250614bd2d2529e7bee2e2d0df295661b7185465193dc3b54ffea30c4702",
chunks: fileChunks{
metadata: "af2f73f800821b8ca7f5d2c33d0ba6018734d809389a47993c621cc62245d9e0",
content: []string{
"e94a5aadf259f008b7d5039420c65d692901846523f503d97d24e2f077786d9a",
},
},
},
},
manifestHashes: []string{
"864984d3b0a0401123325ffac8ce696f3eb67ea9ba290a66e8d4e7ddb41fd1dc", // root
"90cca4ac6ec25d8fdae297f65dfa389abd2db77f1b44a623d9fcb96802a935a7", // metadata
"3665a0de7b2a63ba80fd3bb6f7c2d75b633ee4a297a0d7442cecd89c3553a4d2", // bytes
},
expectedHashesCount: 6,
},
{
manifestType: manifest.ManifestSimpleContentType,
files: []file{ files: []file{
{ {
size: len(simpleData), size: len(simpleData),
dir: "", dir: "",
name: "hello.txt", name: "hello.txt",
reference: "a7c9250614bd2d2529e7bee2e2d0df295661b7185465193dc3b54ffea30c4702",
chunks: fileChunks{ chunks: fileChunks{
metadata: "af2f73f800821b8ca7f5d2c33d0ba6018734d809389a47993c621cc62245d9e0",
content: []string{
"e94a5aadf259f008b7d5039420c65d692901846523f503d97d24e2f077786d9a",
},
},
},
{
size: swarm.ChunkSize,
dir: "",
name: "data/1.txt",
reference: "5241139a93e4c8735b62414c4a3be8d10e83c6644af320f8892cbac0bc869cab",
chunks: fileChunks{
metadata: "ec35ef758093abaeaabc3956c8eeb9739cf6e6168ce44ae912b9b4777b0e9420",
content: []string{
"f833c17be12d68aec95eca7f9d993f7d7aaa7a9c282eb2c3d79ab26a5aeaf384", // bytes (4096)
},
},
},
{
size: swarm.ChunkSize,
dir: "",
name: "data/2.txt",
reference: "940d67638f577ad36701b7ed380ed8e1c4c14e6bb6e19c6a74b0d5ac7cb0fb55",
chunks: fileChunks{
metadata: "a05586fb3c4625e21377ce2043c362835d3eb95bd9970d84db414a0f6164f822",
content: []string{
"f833c17be12d68aec95eca7f9d993f7d7aaa7a9c282eb2c3d79ab26a5aeaf384", // bytes (4096)
},
},
},
},
manifestHashes: []string{
"d2c4586f8791058153464064aa9b90059ad8ab9afe068df37d97f5711a0a197f", // root
"39745d382da0c21042290c59d43840a5685f461bd7da49c36a120136f49869cb", // metadata
"dc763a70a578970c001cb9c59c90615d3e5c19eb4147cc45757481e32bf72ec7", // bytes
},
expectedHashesCount: 12,
ignoreDuplicateHash: true,
},
{
manifestType: manifest.ManifestMantarayContentType,
files: []file{
{
size: len(simpleData),
dir: "",
name: "hello.txt",
reference: "a7c9250614bd2d2529e7bee2e2d0df295661b7185465193dc3b54ffea30c4702",
chunks: fileChunks{
metadata: "af2f73f800821b8ca7f5d2c33d0ba6018734d809389a47993c621cc62245d9e0",
content: []string{ content: []string{
"e94a5aadf259f008b7d5039420c65d692901846523f503d97d24e2f077786d9a", "e94a5aadf259f008b7d5039420c65d692901846523f503d97d24e2f077786d9a",
}, },
...@@ -362,47 +295,38 @@ func TestTraversalManifest(t *testing.T) { ...@@ -362,47 +295,38 @@ func TestTraversalManifest(t *testing.T) {
}, },
manifestHashes: []string{ manifestHashes: []string{
// NOTE: references will be fixed, due to custom obfuscation key function // NOTE: references will be fixed, due to custom obfuscation key function
"596c29bd00b241cb38aba10ca7005bf124baed90b613c2ff11ee891165a487fd", // root "f81ac8ceb2db7e55b718eca35f05233dc523022e36e11f934dbfd5f0cafde198", // root
"70501ac2caed16fc5f929977172a631ac540a5efd567cf1447bf7ee4aae4eb9f", // metadata "05e34f11a0967e8c09968b69c4f486f569ef58a31a197992e01304a1e59f8e75", // metadata
"486914d1449e482ff248268e99c5d7d2772281f033c07f2f74aa4cc1ce3a8fe0", // bytes - root node
"3d6a9e4eec6ebaf6ca6c6412dae6a23c76bc0c0672d259d98562368915d16b88", // bytes - node [h]
}, },
expectedHashesCount: 7, expectedHashesCount: 3,
}, },
{ {
manifestType: manifest.ManifestMantarayContentType,
files: []file{ files: []file{
{ {
size: len(simpleData), size: len(simpleData),
dir: "", dir: "",
name: "hello.txt", name: "hello.txt",
reference: "a7c9250614bd2d2529e7bee2e2d0df295661b7185465193dc3b54ffea30c4702",
chunks: fileChunks{ chunks: fileChunks{
metadata: "af2f73f800821b8ca7f5d2c33d0ba6018734d809389a47993c621cc62245d9e0",
content: []string{ content: []string{
"e94a5aadf259f008b7d5039420c65d692901846523f503d97d24e2f077786d9a", "e94a5aadf259f008b7d5039420c65d692901846523f503d97d24e2f077786d9a",
}, },
}, },
}, },
{ {
size: swarm.ChunkSize, size: swarm.ChunkSize,
dir: "", dir: "",
name: "data/1.txt", name: "data/1.txt",
reference: "5241139a93e4c8735b62414c4a3be8d10e83c6644af320f8892cbac0bc869cab",
chunks: fileChunks{ chunks: fileChunks{
metadata: "ec35ef758093abaeaabc3956c8eeb9739cf6e6168ce44ae912b9b4777b0e9420",
content: []string{ content: []string{
"f833c17be12d68aec95eca7f9d993f7d7aaa7a9c282eb2c3d79ab26a5aeaf384", // bytes (4096) "f833c17be12d68aec95eca7f9d993f7d7aaa7a9c282eb2c3d79ab26a5aeaf384", // bytes (4096)
}, },
}, },
}, },
{ {
size: swarm.ChunkSize, size: swarm.ChunkSize,
dir: "", dir: "",
name: "data/2.txt", name: "data/2.txt",
reference: "940d67638f577ad36701b7ed380ed8e1c4c14e6bb6e19c6a74b0d5ac7cb0fb55",
chunks: fileChunks{ chunks: fileChunks{
metadata: "a05586fb3c4625e21377ce2043c362835d3eb95bd9970d84db414a0f6164f822",
content: []string{ content: []string{
"f833c17be12d68aec95eca7f9d993f7d7aaa7a9c282eb2c3d79ab26a5aeaf384", // bytes (4096) "f833c17be12d68aec95eca7f9d993f7d7aaa7a9c282eb2c3d79ab26a5aeaf384", // bytes (4096)
}, },
...@@ -411,29 +335,20 @@ func TestTraversalManifest(t *testing.T) { ...@@ -411,29 +335,20 @@ func TestTraversalManifest(t *testing.T) {
}, },
manifestHashes: []string{ manifestHashes: []string{
// NOTE: references will be fixed, due to custom obfuscation key function // NOTE: references will be fixed, due to custom obfuscation key function
"10a70b3a0102b94e909d08b91b98a2d8ca22c762ad7286d5451de2dd6432c218", // root "d182df1cb214167d085256fafa657f38a191efe51af16834f6288ef23416fd25", // root
"fb2c46942a3b2148e856d778731de9c173a26bec027aa27897f32e423eb14458", // metadata "05e34f11a0967e8c09968b69c4f486f569ef58a31a197992e01304a1e59f8e75", // manifest entry
"39caaed3c9e42ea3ad9a374d37181e21c9a686367e0ae42d66c20465538d9789", // bytes - root node "7e6bc53ca11bff459f77892563d04e09b440c63ce2f7d5fe8a8b0f0ba9eeefcf", // manifest entry (Edge PathSeparator)
"735aee067bdc02e1c1e8e88eea8b5b0535bfc9d0d36bf3a4d6fbac94a03bc233", // bytes - node [d] "b2662d17d51ce734695d993b44c0e2df34c3f50d5889e5bc3b8718838658e6b0", // manifest file entry (1.txt)
"3d6a9e4eec6ebaf6ca6c6412dae6a23c76bc0c0672d259d98562368915d16b88", // bytes - node [h] "b2662d17d51ce734695d993b44c0e2df34c3f50d5889e5bc3b8718838658e6b0", // manifest file entry (2.txt)
"ddb31ae6a74caf5df03e5d8bf6056e589229b4cae3087433db64a4768923f73b", // bytes - node [d]/[2]
"281dc7467f647abbfbaaf259a95ab60df8bf76ec3fbc525bfbca794d6360fa46", // bytes - node [d]/[1]
}, },
expectedHashesCount: 16, expectedHashesCount: 8,
ignoreDuplicateHash: true, ignoreDuplicateHash: true,
}, },
} }
for _, tc := range testCases { for _, tc := range testCases {
mediatype, _, err := mime.ParseMediaType(tc.manifestType)
if err != nil {
t.Fatal(err)
}
mediatype = strings.Split(mediatype, "/")[1]
mediatype = strings.Split(mediatype, "+")[0]
testName := fmt.Sprintf("%s-%d-files-%d-chunks", mediatype, len(tc.files), tc.expectedHashesCount) testName := fmt.Sprintf("%s-%d-files-%d-chunks", defaultMediaType, len(tc.files), tc.expectedHashesCount)
t.Run(testName, func(t *testing.T) { t.Run(testName, func(t *testing.T) {
var ( var (
...@@ -444,7 +359,6 @@ func TestTraversalManifest(t *testing.T) { ...@@ -444,7 +359,6 @@ func TestTraversalManifest(t *testing.T) {
// add hashes for files // add hashes for files
for _, f := range tc.files { for _, f := range tc.files {
expectedHashes = append(expectedHashes, f.reference, f.chunks.metadata)
// add hash for each content // add hash for each content
expectedHashes = append(expectedHashes, f.chunks.content...) expectedHashes = append(expectedHashes, f.chunks.content...)
} }
...@@ -454,21 +368,10 @@ func TestTraversalManifest(t *testing.T) { ...@@ -454,21 +368,10 @@ func TestTraversalManifest(t *testing.T) {
ctx := context.Background() ctx := context.Background()
var dirManifest manifest.Interface
ls := loadsave.New(mockStorer, storage.ModePutRequest, false) ls := loadsave.New(mockStorer, storage.ModePutRequest, false)
switch tc.manifestType { dirManifest, err := manifest.NewMantarayManifest(ls, false)
case manifest.ManifestSimpleContentType: if err != nil {
dirManifest, err = manifest.NewSimpleManifest(ls) t.Fatal(err)
if err != nil {
t.Fatal(err)
}
case manifest.ManifestMantarayContentType:
dirManifest, err = manifest.NewMantarayManifest(ls, false)
if err != nil {
t.Fatal(err)
}
default:
t.Fatalf("manifest: invalid type: %s", tc.manifestType)
} }
// add files to manifest // add files to manifest
...@@ -485,73 +388,20 @@ func TestTraversalManifest(t *testing.T) { ...@@ -485,73 +388,20 @@ func TestTraversalManifest(t *testing.T) {
if fileName == "" { if fileName == "" {
fileName = fr.String() fileName = fr.String()
} }
m := entry.NewMetadata(fileName)
metadataBytes, err := json.Marshal(m)
if err != nil {
t.Fatal(err)
}
pipe = builder.NewPipelineBuilder(ctx, mockStorer, storage.ModePutUpload, false)
mr, err := builder.FeedPipeline(ctx, pipe, bytes.NewReader(metadataBytes), int64(len(metadataBytes)))
if err != nil {
t.Fatal(err)
}
entrie := entry.New(fr, mr)
fileEntryBytes, err := entrie.MarshalBinary()
if err != nil {
t.Fatal(err)
}
pipe = builder.NewPipelineBuilder(ctx, mockStorer, storage.ModePutUpload, false)
reference, err := builder.FeedPipeline(ctx, pipe, bytes.NewReader(fileEntryBytes), int64(len(fileEntryBytes)))
if err != nil {
t.Fatal(err)
}
filePath := path.Join(f.dir, fileName) filePath := path.Join(f.dir, fileName)
err = dirManifest.Add(ctx, filePath, manifest.NewEntry(reference, nil)) err = dirManifest.Add(ctx, filePath, manifest.NewEntry(fr, nil))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
// save manifest // save manifest
manifestBytesReference, err := dirManifest.Store(ctx) manifestReference, err := dirManifest.Store(ctx)
if err != nil {
t.Fatal(err)
}
// store the manifest metadata and get its reference
m := entry.NewMetadata(manifestBytesReference.String())
m.MimeType = dirManifest.Type()
metadataBytes, err := json.Marshal(m)
if err != nil {
t.Fatal(err)
}
pipe := builder.NewPipelineBuilder(ctx, mockStorer, storage.ModePutUpload, false)
mr, err := builder.FeedPipeline(ctx, pipe, bytes.NewReader(metadataBytes), int64(len(metadataBytes)))
if err != nil {
t.Fatal(err)
}
// now join both references (fr, mr) to create an entry and store it
e := entry.New(manifestBytesReference, mr)
fileEntryBytes, err := e.MarshalBinary()
if err != nil {
t.Fatal(err)
}
pipe = builder.NewPipelineBuilder(ctx, mockStorer, storage.ModePutUpload, false)
manifestFileReference, err := builder.FeedPipeline(ctx, pipe, bytes.NewReader(fileEntryBytes), int64(len(fileEntryBytes)))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
traversalCheck(t, mockStorer, traverseFn, manifestFileReference, tc.expectedHashesCount, expectedHashes, tc.ignoreDuplicateHash) traversalCheck(t, mockStorer, traverseFn, manifestReference, tc.expectedHashesCount, expectedHashes, tc.ignoreDuplicateHash)
}) })
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment