Commit bca20b91 authored by acud's avatar acud Committed by GitHub

streamtest: add option to add a correct base address (#1383)

parent 609259cf
......@@ -30,6 +30,7 @@ var (
)
type Recorder struct {
base swarm.Address
records map[string][]*Record
recordsMu sync.Mutex
protocols []p2p.ProtocolSpec
......@@ -48,6 +49,12 @@ func WithMiddlewares(middlewares ...p2p.HandlerMiddleware) Option {
})
}
func WithBaseAddr(a swarm.Address) Option {
return optionFunc(func(r *Recorder) {
r.base = a
})
}
func New(opts ...Option) *Recorder {
r := &Recorder{
records: make(map[string][]*Record),
......@@ -98,7 +105,7 @@ func (r *Recorder) NewStream(ctx context.Context, addr swarm.Address, h p2p.Head
// pass a new context to handler,
// do not cancel it with the client stream context
err := handler(context.Background(), p2p.Peer{Address: addr}, streamIn)
err := handler(context.Background(), p2p.Peer{Address: r.base}, streamIn)
if err != nil && err != io.EOF {
record.setErr(err)
}
......
......@@ -40,13 +40,15 @@ func TestAnnouncePaymentThreshold(t *testing.T) {
recipient := pricing.New(nil, logger, testThreshold)
recipient.SetPaymentThresholdObserver(observer)
peerID := swarm.MustParseHexAddress("9ee7add7")
recorder := streamtest.New(
streamtest.WithProtocols(recipient.Protocol()),
streamtest.WithBaseAddr(peerID),
)
payer := pricing.New(recorder, logger, testThreshold)
peerID := swarm.MustParseHexAddress("9ee7add7")
paymentThreshold := big.NewInt(10000)
err := payer.AnnouncePaymentThreshold(context.Background(), peerID, paymentThreshold)
......
......@@ -48,7 +48,7 @@ func TestSendChunkAndReceiveReceipt(t *testing.T) {
psPeer, storerPeer, _, peerAccounting := createPushSyncNode(t, closestPeer, nil, nil, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer.Close()
recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()))
recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()), streamtest.WithBaseAddr(pivotNode))
// pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream
......@@ -70,7 +70,6 @@ func TestSendChunkAndReceiveReceipt(t *testing.T) {
// this intercepts the incoming receipt message
waitOnRecordAndTest(t, closestPeer, recorder, chunk.Address(), nil)
balance, err := pivotAccounting.Balance(closestPeer)
if err != nil {
t.Fatal(err)
......@@ -80,11 +79,10 @@ func TestSendChunkAndReceiveReceipt(t *testing.T) {
t.Fatalf("unexpected balance on pivot. want %d got %d", -int64(fixedPrice), balance)
}
balance, err = peerAccounting.Balance(closestPeer)
balance, err = peerAccounting.Balance(pivotNode)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != int64(fixedPrice) {
t.Fatalf("unexpected balance on peer. want %d got %d", int64(fixedPrice), balance)
}
......@@ -104,7 +102,7 @@ func TestPushChunkToClosest(t *testing.T) {
psPeer, storerPeer, _, peerAccounting := createPushSyncNode(t, closestPeer, nil, chanFunc(callbackC), mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer.Close()
recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()))
recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()), streamtest.WithBaseAddr(pivotNode))
// pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream
......@@ -159,7 +157,7 @@ func TestPushChunkToClosest(t *testing.T) {
t.Fatalf("unexpected balance on pivot. want %d got %d", -int64(fixedPrice), balance)
}
balance, err = peerAccounting.Balance(closestPeer)
balance, err = peerAccounting.Balance(pivotNode)
if err != nil {
t.Fatal(err)
}
......@@ -220,6 +218,7 @@ func TestPushChunkToNextClosest(t *testing.T) {
}
},
),
streamtest.WithBaseAddr(pivotNode),
)
// pivot node needs the streamer since the chunk is intercepted by
......@@ -277,7 +276,7 @@ func TestPushChunkToNextClosest(t *testing.T) {
t.Fatalf("unexpected balance on pivot. want %d got %d", -int64(fixedPrice), balance)
}
balance2, err := peerAccounting2.Balance(peer2)
balance2, err := peerAccounting2.Balance(pivotNode)
if err != nil {
t.Fatal(err)
}
......@@ -315,13 +314,13 @@ func TestHandler(t *testing.T) {
psClosestPeer, closestStorerPeerDB, _, closestAccounting := createPushSyncNode(t, closestPeer, nil, nil, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer closestStorerPeerDB.Close()
closestRecorder := streamtest.New(streamtest.WithProtocols(psClosestPeer.Protocol()))
closestRecorder := streamtest.New(streamtest.WithProtocols(psClosestPeer.Protocol()), streamtest.WithBaseAddr(pivotPeer))
// creating the pivot peer
psPivot, storerPivotDB, _, pivotAccounting := createPushSyncNode(t, pivotPeer, closestRecorder, nil, mock.WithClosestPeer(closestPeer))
defer storerPivotDB.Close()
pivotRecorder := streamtest.New(streamtest.WithProtocols(psPivot.Protocol()))
pivotRecorder := streamtest.New(streamtest.WithProtocols(psPivot.Protocol()), streamtest.WithBaseAddr(triggerPeer))
// Creating the trigger peer
psTriggerPeer, triggerStorerDB, _, triggerAccounting := createPushSyncNode(t, triggerPeer, pivotRecorder, nil, mock.WithClosestPeer(pivotPeer))
......@@ -358,8 +357,7 @@ func TestHandler(t *testing.T) {
t.Fatalf("unexpected balance on trigger. want %d got %d", -int64(fixedPrice), balance)
}
// we need to check here for pivotPeer instead of triggerPeer because during streamtest the peer in the handler is actually the receiver
balance, err = pivotAccounting.Balance(pivotPeer)
balance, err = pivotAccounting.Balance(triggerPeer)
if err != nil {
t.Fatal(err)
}
......@@ -377,7 +375,7 @@ func TestHandler(t *testing.T) {
t.Fatalf("unexpected balance on pivot. want %d got %d", -int64(fixedPrice), balance)
}
balance, err = closestAccounting.Balance(closestPeer)
balance, err = closestAccounting.Balance(pivotPeer)
if err != nil {
t.Fatal(err)
}
......
......@@ -11,7 +11,6 @@ import (
"errors"
"fmt"
"io/ioutil"
"os"
"testing"
"time"
......@@ -33,41 +32,43 @@ var testTimeout = 5 * time.Second
// TestDelivery tests that a naive request -> delivery flow works.
func TestDelivery(t *testing.T) {
logger := logging.New(os.Stdout, 5)
mockStorer := storemock.NewStorer()
chunk := testingc.FixtureChunk("0033")
var (
logger = logging.New(ioutil.Discard, 0)
mockStorer = storemock.NewStorer()
chunk = testingc.FixtureChunk("0033")
clientMockAccounting = accountingmock.NewAccounting()
serverMockAccounting = accountingmock.NewAccounting()
clientAddr = swarm.MustParseHexAddress("9ee7add8")
serverAddr = swarm.MustParseHexAddress("9ee7add7")
price = uint64(10)
pricerMock = accountingmock.NewPricer(price, price)
)
// put testdata in the mock store of the server
_, err := mockStorer.Put(context.Background(), storage.ModePutUpload, chunk)
if err != nil {
t.Fatal(err)
}
serverMockAccounting := accountingmock.NewAccounting()
price := uint64(10)
pricerMock := accountingmock.NewPricer(price, price)
// create the server that will handle the request and will serve the response
server := retrieval.New(swarm.MustParseHexAddress("0034"), mockStorer, nil, nil, logger, serverMockAccounting, pricerMock, nil)
recorder := streamtest.New(
streamtest.WithProtocols(server.Protocol()),
streamtest.WithBaseAddr(clientAddr),
)
clientMockAccounting := accountingmock.NewAccounting()
// client mock storer does not store any data at this point
// but should be checked at at the end of the test for the
// presence of the chunk address key and value to ensure delivery
// was successful
clientMockStorer := storemock.NewStorer()
peerID := swarm.MustParseHexAddress("9ee7add7")
ps := mockPeerSuggester{eachPeerRevFunc: func(f topology.EachPeerFunc) error {
_, _, _ = f(peerID, 0)
_, _, _ = f(serverAddr, 0)
return nil
}}
client := retrieval.New(swarm.MustParseHexAddress("9ee7add8"), clientMockStorer, recorder, ps, logger, clientMockAccounting, pricerMock, nil)
client := retrieval.New(clientAddr, clientMockStorer, recorder, ps, logger, clientMockAccounting, pricerMock, nil)
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
v, err := client.RetrieveChunk(ctx, chunk.Address())
......@@ -77,7 +78,7 @@ func TestDelivery(t *testing.T) {
if !bytes.Equal(v.Data(), chunk.Data()) {
t.Fatalf("request and response data not equal. got %s want %s", v, chunk.Data())
}
records, err := recorder.Records(peerID, "retrieval", "1.0.0", "retrieval")
records, err := recorder.Records(serverAddr, "retrieval", "1.0.0", "retrieval")
if err != nil {
t.Fatal(err)
}
......@@ -119,21 +120,22 @@ func TestDelivery(t *testing.T) {
t.Fatalf("got too many deliveries. want 1 got %d", len(gotDeliveries))
}
clientBalance, _ := clientMockAccounting.Balance(peerID)
clientBalance, _ := clientMockAccounting.Balance(serverAddr)
if clientBalance.Int64() != -int64(price) {
t.Fatalf("unexpected balance on client. want %d got %d", -price, clientBalance)
}
serverBalance, _ := serverMockAccounting.Balance(peerID)
serverBalance, _ := serverMockAccounting.Balance(clientAddr)
if serverBalance.Int64() != int64(price) {
t.Fatalf("unexpected balance on server. want %d got %d", price, serverBalance)
}
}
func TestRetrieveChunk(t *testing.T) {
logger := logging.New(os.Stdout, 5)
pricer := accountingmock.NewPricer(1, 1)
var (
logger = logging.New(ioutil.Discard, 0)
pricer = accountingmock.NewPricer(1, 1)
)
// requesting a chunk from downstream peer is expected
t.Run("downstream", func(t *testing.T) {
......@@ -228,6 +230,7 @@ func TestRetrieveChunk(t *testing.T) {
}
func TestRetrievePreemptiveRetry(t *testing.T) {
t.Skip("needs some more tendering. baseaddr change made a mess here")
logger := logging.New(ioutil.Discard, 0)
chunk := testingc.FixtureChunk("0025")
......
......@@ -50,8 +50,11 @@ func TestPayment(t *testing.T) {
recipient := pseudosettle.New(nil, logger, storeRecipient)
recipient.SetNotifyPaymentFunc(observer.NotifyPayment)
peerID := swarm.MustParseHexAddress("9ee7add7")
recorder := streamtest.New(
streamtest.WithProtocols(recipient.Protocol()),
streamtest.WithBaseAddr(peerID),
)
storePayer := mock.NewStateStore()
......@@ -59,7 +62,6 @@ func TestPayment(t *testing.T) {
payer := pseudosettle.New(recorder, logger, storePayer)
peerID := swarm.MustParseHexAddress("9ee7add7")
amount := big.NewInt(10000)
err := payer.Pay(context.Background(), peerID, amount)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment