2014-03-17 22:44:22 +08:00
|
|
|
package torrent
|
|
|
|
|
|
|
|
import (
|
2015-02-26 22:46:02 +08:00
|
|
|
"encoding/binary"
|
2016-02-17 15:26:10 +08:00
|
|
|
"errors"
|
2014-12-04 09:57:43 +08:00
|
|
|
"fmt"
|
2015-06-02 22:16:38 +08:00
|
|
|
"io"
|
2015-02-26 22:46:02 +08:00
|
|
|
"io/ioutil"
|
2016-02-17 15:26:10 +08:00
|
|
|
"math/rand"
|
2014-12-04 09:57:43 +08:00
|
|
|
"net"
|
2014-03-20 13:58:09 +08:00
|
|
|
"os"
|
2016-09-21 19:02:18 +08:00
|
|
|
"path/filepath"
|
2016-02-17 15:26:10 +08:00
|
|
|
"strings"
|
2015-08-12 14:51:12 +08:00
|
|
|
"sync"
|
2014-03-17 22:44:22 +08:00
|
|
|
"testing"
|
2014-11-18 08:04:09 +08:00
|
|
|
"time"
|
2014-08-21 16:24:19 +08:00
|
|
|
|
2015-05-20 20:20:11 +08:00
|
|
|
_ "github.com/anacrolix/envpprof"
|
2015-08-03 22:29:01 +08:00
|
|
|
"github.com/anacrolix/missinggo"
|
2016-03-29 08:14:34 +08:00
|
|
|
"github.com/anacrolix/missinggo/filecache"
|
2016-05-09 12:37:29 +08:00
|
|
|
"github.com/anacrolix/missinggo/pubsub"
|
2015-02-26 22:46:02 +08:00
|
|
|
"github.com/bradfitz/iter"
|
2015-06-16 15:14:15 +08:00
|
|
|
"github.com/stretchr/testify/assert"
|
2015-07-15 13:51:42 +08:00
|
|
|
"github.com/stretchr/testify/require"
|
2016-10-10 11:58:29 +08:00
|
|
|
"golang.org/x/time/rate"
|
2015-02-26 22:46:02 +08:00
|
|
|
|
2015-04-29 22:31:34 +08:00
|
|
|
"github.com/anacrolix/torrent/bencode"
|
2015-03-20 13:37:44 +08:00
|
|
|
"github.com/anacrolix/torrent/internal/testutil"
|
2015-08-03 23:07:22 +08:00
|
|
|
"github.com/anacrolix/torrent/iplist"
|
2015-06-02 22:16:38 +08:00
|
|
|
"github.com/anacrolix/torrent/metainfo"
|
2016-03-28 17:38:30 +08:00
|
|
|
"github.com/anacrolix/torrent/storage"
|
2014-03-17 22:44:22 +08:00
|
|
|
)
|
|
|
|
|
2017-06-01 20:57:08 +08:00
|
|
|
func TestingConfig() *Config {
|
|
|
|
return &Config{
|
2017-11-07 13:00:08 +08:00
|
|
|
ListenAddr: "localhost:0",
|
|
|
|
NoDHT: true,
|
|
|
|
DataDir: tempDir(),
|
2017-09-18 10:14:16 +08:00
|
|
|
DisableTrackers: true,
|
2017-11-07 13:00:08 +08:00
|
|
|
// Debug: true,
|
2017-06-01 20:57:08 +08:00
|
|
|
}
|
2015-02-26 22:46:02 +08:00
|
|
|
}
|
|
|
|
|
2014-08-21 16:07:06 +08:00
|
|
|
func TestClientDefault(t *testing.T) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(TestingConfig())
|
|
|
|
require.NoError(t, err)
|
|
|
|
cl.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestBoltPieceCompletionClosedWhenClientClosed(t *testing.T) {
|
|
|
|
cfg := TestingConfig()
|
|
|
|
pc, err := storage.NewBoltPieceCompletion(cfg.DataDir)
|
|
|
|
require.NoError(t, err)
|
|
|
|
ci := storage.NewFileWithCompletion(cfg.DataDir, pc)
|
|
|
|
defer ci.Close()
|
|
|
|
cfg.DefaultStorage = ci
|
|
|
|
cl, err := NewClient(cfg)
|
|
|
|
require.NoError(t, err)
|
|
|
|
cl.Close()
|
|
|
|
// And again, https://github.com/anacrolix/torrent/issues/158
|
|
|
|
cl, err = NewClient(cfg)
|
2016-03-28 18:57:04 +08:00
|
|
|
require.NoError(t, err)
|
2015-03-08 14:28:14 +08:00
|
|
|
cl.Close()
|
2014-08-21 16:07:06 +08:00
|
|
|
}
|
|
|
|
|
2015-02-06 11:54:59 +08:00
|
|
|
func TestAddDropTorrent(t *testing.T) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(TestingConfig())
|
2016-03-28 18:57:04 +08:00
|
|
|
require.NoError(t, err)
|
2015-03-08 14:28:14 +08:00
|
|
|
defer cl.Close()
|
2015-02-06 11:54:59 +08:00
|
|
|
dir, mi := testutil.GreetingTestTorrent()
|
|
|
|
defer os.RemoveAll(dir)
|
2015-03-18 15:32:31 +08:00
|
|
|
tt, new, err := cl.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
|
2016-03-28 18:57:04 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
assert.True(t, new)
|
2016-07-05 22:42:16 +08:00
|
|
|
tt.SetMaxEstablishedConns(0)
|
|
|
|
tt.SetMaxEstablishedConns(1)
|
2015-02-06 11:54:59 +08:00
|
|
|
tt.Drop()
|
|
|
|
}
|
|
|
|
|
2014-03-17 22:44:22 +08:00
|
|
|
func TestAddTorrentNoSupportedTrackerSchemes(t *testing.T) {
|
|
|
|
t.SkipNow()
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAddTorrentNoUsableURLs(t *testing.T) {
|
|
|
|
t.SkipNow()
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAddPeersToUnknownTorrent(t *testing.T) {
|
|
|
|
t.SkipNow()
|
|
|
|
}
|
2014-03-20 13:58:09 +08:00
|
|
|
|
|
|
|
func TestPieceHashSize(t *testing.T) {
|
2014-04-09 00:36:05 +08:00
|
|
|
if pieceHash.Size() != 20 {
|
2014-03-20 13:58:09 +08:00
|
|
|
t.FailNow()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTorrentInitialState(t *testing.T) {
|
|
|
|
dir, mi := testutil.GreetingTestTorrent()
|
|
|
|
defer os.RemoveAll(dir)
|
2016-05-09 12:37:29 +08:00
|
|
|
tor := &Torrent{
|
2016-08-26 18:29:05 +08:00
|
|
|
infoHash: mi.HashInfoBytes(),
|
2016-05-09 12:37:29 +08:00
|
|
|
pieceStateChanges: pubsub.NewPubSub(),
|
|
|
|
}
|
2015-07-15 13:31:18 +08:00
|
|
|
tor.chunkSize = 2
|
2017-11-07 13:00:08 +08:00
|
|
|
tor.storageOpener = storage.NewClient(storage.NewFileWithCompletion(tempDir(), storage.NewMapPieceCompletion()))
|
2016-03-28 17:38:30 +08:00
|
|
|
// Needed to lock for asynchronous piece verification.
|
|
|
|
tor.cl = new(Client)
|
2017-10-12 13:06:14 +08:00
|
|
|
tor.cl.mu.Lock()
|
2016-08-26 18:29:05 +08:00
|
|
|
err := tor.setInfoBytes(mi.InfoBytes)
|
2017-10-12 13:06:14 +08:00
|
|
|
tor.cl.mu.Unlock()
|
2016-03-28 17:38:30 +08:00
|
|
|
require.NoError(t, err)
|
2016-04-03 14:50:53 +08:00
|
|
|
require.Len(t, tor.pieces, 3)
|
2014-12-09 11:59:01 +08:00
|
|
|
tor.pendAllChunkSpecs(0)
|
2016-05-07 16:56:44 +08:00
|
|
|
tor.cl.mu.Lock()
|
2016-01-13 14:11:59 +08:00
|
|
|
assert.EqualValues(t, 3, tor.pieceNumPendingChunks(0))
|
2016-05-07 16:56:44 +08:00
|
|
|
tor.cl.mu.Unlock()
|
2015-07-15 13:31:18 +08:00
|
|
|
assert.EqualValues(t, chunkSpec{4, 1}, chunkIndexSpec(2, tor.pieceLength(0), tor.chunkSize))
|
2014-03-20 13:58:09 +08:00
|
|
|
}
|
2014-06-29 17:07:43 +08:00
|
|
|
|
|
|
|
func TestUnmarshalPEXMsg(t *testing.T) {
|
|
|
|
var m peerExchangeMessage
|
|
|
|
if err := bencode.Unmarshal([]byte("d5:added12:\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0ce"), &m); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if len(m.Added) != 2 {
|
|
|
|
t.FailNow()
|
|
|
|
}
|
|
|
|
if m.Added[0].Port != 0x506 {
|
|
|
|
t.FailNow()
|
|
|
|
}
|
|
|
|
}
|
2014-11-18 08:04:09 +08:00
|
|
|
|
|
|
|
func TestReducedDialTimeout(t *testing.T) {
|
2017-11-08 02:14:13 +08:00
|
|
|
cfg := &Config{}
|
|
|
|
cfg.setDefaults()
|
2014-11-18 08:04:09 +08:00
|
|
|
for _, _case := range []struct {
|
|
|
|
Max time.Duration
|
|
|
|
HalfOpenLimit int
|
|
|
|
PendingPeers int
|
|
|
|
ExpectedReduced time.Duration
|
|
|
|
}{
|
2017-11-08 02:14:13 +08:00
|
|
|
{cfg.NominalDialTimeout, 40, 0, cfg.NominalDialTimeout},
|
|
|
|
{cfg.NominalDialTimeout, 40, 1, cfg.NominalDialTimeout},
|
|
|
|
{cfg.NominalDialTimeout, 40, 39, cfg.NominalDialTimeout},
|
|
|
|
{cfg.NominalDialTimeout, 40, 40, cfg.NominalDialTimeout / 2},
|
|
|
|
{cfg.NominalDialTimeout, 40, 80, cfg.NominalDialTimeout / 3},
|
|
|
|
{cfg.NominalDialTimeout, 40, 4000, cfg.NominalDialTimeout / 101},
|
2014-11-18 08:04:09 +08:00
|
|
|
} {
|
2017-11-08 02:14:13 +08:00
|
|
|
reduced := reducedDialTimeout(cfg.MinDialTimeout, _case.Max, _case.HalfOpenLimit, _case.PendingPeers)
|
2014-11-19 11:53:00 +08:00
|
|
|
expected := _case.ExpectedReduced
|
2017-11-08 02:14:13 +08:00
|
|
|
if expected < cfg.MinDialTimeout {
|
|
|
|
expected = cfg.MinDialTimeout
|
2014-11-19 11:53:00 +08:00
|
|
|
}
|
|
|
|
if reduced != expected {
|
2014-11-18 08:04:09 +08:00
|
|
|
t.Fatalf("expected %s, got %s", _case.ExpectedReduced, reduced)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-12-04 09:57:43 +08:00
|
|
|
|
|
|
|
func TestUTPRawConn(t *testing.T) {
|
2017-06-16 16:08:24 +08:00
|
|
|
l, err := NewUtpSocket("udp", "")
|
2017-09-01 13:09:10 +08:00
|
|
|
require.NoError(t, err)
|
2014-12-04 09:57:43 +08:00
|
|
|
defer l.Close()
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
_, err := l.Accept()
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
// Connect a UTP peer to see if the RawConn will still work.
|
2017-09-01 13:09:10 +08:00
|
|
|
s, err := NewUtpSocket("udp", "")
|
|
|
|
require.NoError(t, err)
|
2015-08-03 22:48:17 +08:00
|
|
|
defer s.Close()
|
|
|
|
utpPeer, err := s.Dial(fmt.Sprintf("localhost:%d", missinggo.AddrPort(l.Addr())))
|
2017-09-01 13:09:10 +08:00
|
|
|
require.NoError(t, err)
|
2014-12-04 09:57:43 +08:00
|
|
|
defer utpPeer.Close()
|
|
|
|
peer, err := net.ListenPacket("udp", ":0")
|
2017-09-01 13:09:10 +08:00
|
|
|
require.NoError(t, err)
|
2014-12-04 09:57:43 +08:00
|
|
|
defer peer.Close()
|
|
|
|
|
|
|
|
msgsReceived := 0
|
2015-05-24 19:37:14 +08:00
|
|
|
// How many messages to send. I've set this to double the channel buffer
|
|
|
|
// size in the raw packetConn.
|
|
|
|
const N = 200
|
2014-12-04 09:57:43 +08:00
|
|
|
readerStopped := make(chan struct{})
|
|
|
|
// The reader goroutine.
|
|
|
|
go func() {
|
|
|
|
defer close(readerStopped)
|
|
|
|
b := make([]byte, 500)
|
|
|
|
for i := 0; i < N; i++ {
|
2015-10-03 22:02:14 +08:00
|
|
|
n, _, err := l.ReadFrom(b)
|
2017-09-01 13:22:08 +08:00
|
|
|
require.NoError(t, err)
|
2014-12-04 09:57:43 +08:00
|
|
|
msgsReceived++
|
|
|
|
var d int
|
|
|
|
fmt.Sscan(string(b[:n]), &d)
|
2017-09-01 13:22:08 +08:00
|
|
|
assert.Equal(t, i, d)
|
2014-12-04 09:57:43 +08:00
|
|
|
}
|
|
|
|
}()
|
2015-08-03 22:29:01 +08:00
|
|
|
udpAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("localhost:%d", missinggo.AddrPort(l.Addr())))
|
2017-09-01 13:09:10 +08:00
|
|
|
require.NoError(t, err)
|
2014-12-04 09:57:43 +08:00
|
|
|
for i := 0; i < N; i++ {
|
2014-12-26 14:19:01 +08:00
|
|
|
_, err := peer.WriteTo([]byte(fmt.Sprintf("%d", i)), udpAddr)
|
2017-09-01 13:22:08 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
time.Sleep(time.Millisecond)
|
2014-12-04 09:57:43 +08:00
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-readerStopped:
|
|
|
|
case <-time.After(time.Second):
|
|
|
|
t.Fatal("reader timed out")
|
|
|
|
}
|
|
|
|
if msgsReceived != N {
|
|
|
|
t.Fatalf("messages received: %d", msgsReceived)
|
|
|
|
}
|
|
|
|
}
|
2015-01-11 18:42:57 +08:00
|
|
|
|
|
|
|
func TestTwoClientsArbitraryPorts(t *testing.T) {
|
|
|
|
for i := 0; i < 2; i++ {
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(TestingConfig())
|
2015-01-11 18:42:57 +08:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-03-08 14:28:14 +08:00
|
|
|
defer cl.Close()
|
2015-01-11 18:42:57 +08:00
|
|
|
}
|
|
|
|
}
|
2015-02-26 22:46:02 +08:00
|
|
|
|
|
|
|
func TestAddDropManyTorrents(t *testing.T) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(TestingConfig())
|
2016-03-24 20:52:38 +08:00
|
|
|
require.NoError(t, err)
|
2015-03-08 14:28:14 +08:00
|
|
|
defer cl.Close()
|
2015-02-26 22:46:02 +08:00
|
|
|
for i := range iter.N(1000) {
|
2015-03-18 15:32:31 +08:00
|
|
|
var spec TorrentSpec
|
|
|
|
binary.PutVarint(spec.InfoHash[:], int64(i))
|
|
|
|
tt, new, err := cl.AddTorrentSpec(&spec)
|
2016-03-24 20:52:38 +08:00
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.True(t, new)
|
2015-03-18 15:32:31 +08:00
|
|
|
defer tt.Drop()
|
2015-02-26 22:46:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-10 13:01:08 +08:00
|
|
|
type FileCacheClientStorageFactoryParams struct {
|
|
|
|
Capacity int64
|
|
|
|
SetCapacity bool
|
2016-09-02 13:10:57 +08:00
|
|
|
Wrapper func(*filecache.Cache) storage.ClientImpl
|
2016-07-10 13:01:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func NewFileCacheClientStorageFactory(ps FileCacheClientStorageFactoryParams) storageFactory {
|
2016-09-02 13:10:57 +08:00
|
|
|
return func(dataDir string) storage.ClientImpl {
|
2016-07-10 13:01:08 +08:00
|
|
|
fc, err := filecache.NewCache(dataDir)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
if ps.SetCapacity {
|
|
|
|
fc.SetCapacity(ps.Capacity)
|
|
|
|
}
|
|
|
|
return ps.Wrapper(fc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-02 13:10:57 +08:00
|
|
|
type storageFactory func(string) storage.ClientImpl
|
2016-07-10 13:01:08 +08:00
|
|
|
|
2016-02-21 21:30:42 +08:00
|
|
|
func TestClientTransferDefault(t *testing.T) {
|
2016-03-28 17:38:30 +08:00
|
|
|
testClientTransfer(t, testClientTransferParams{
|
2016-07-10 13:01:08 +08:00
|
|
|
ExportClientStatus: true,
|
|
|
|
LeecherStorage: NewFileCacheClientStorageFactory(FileCacheClientStorageFactoryParams{
|
|
|
|
Wrapper: fileCachePieceResourceStorage,
|
|
|
|
}),
|
2016-03-28 17:38:30 +08:00
|
|
|
})
|
2016-02-21 21:30:42 +08:00
|
|
|
}
|
|
|
|
|
2016-10-10 14:29:39 +08:00
|
|
|
func TestClientTransferRateLimitedUpload(t *testing.T) {
|
2016-10-10 11:58:29 +08:00
|
|
|
started := time.Now()
|
|
|
|
testClientTransfer(t, testClientTransferParams{
|
|
|
|
// We are uploading 13 bytes (the length of the greeting torrent). The
|
|
|
|
// chunks are 2 bytes in length. Then the smallest burst we can run
|
|
|
|
// with is 2. Time taken is (13-burst)/rate.
|
|
|
|
SeederUploadRateLimiter: rate.NewLimiter(11, 2),
|
|
|
|
})
|
|
|
|
require.True(t, time.Since(started) > time.Second)
|
|
|
|
}
|
|
|
|
|
2016-10-10 14:29:39 +08:00
|
|
|
func TestClientTransferRateLimitedDownload(t *testing.T) {
|
|
|
|
testClientTransfer(t, testClientTransferParams{
|
|
|
|
LeecherDownloadRateLimiter: rate.NewLimiter(512, 512),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-09-02 13:10:57 +08:00
|
|
|
func fileCachePieceResourceStorage(fc *filecache.Cache) storage.ClientImpl {
|
2016-05-16 20:02:03 +08:00
|
|
|
return storage.NewResourcePieces(fc.AsResourceProvider())
|
2016-05-16 18:11:00 +08:00
|
|
|
}
|
|
|
|
|
2016-02-21 23:41:07 +08:00
|
|
|
func TestClientTransferSmallCache(t *testing.T) {
|
|
|
|
testClientTransfer(t, testClientTransferParams{
|
2016-07-10 13:01:08 +08:00
|
|
|
LeecherStorage: NewFileCacheClientStorageFactory(FileCacheClientStorageFactoryParams{
|
|
|
|
SetCapacity: true,
|
|
|
|
// Going below the piece length means it can't complete a piece so
|
|
|
|
// that it can be hashed.
|
|
|
|
Capacity: 5,
|
|
|
|
Wrapper: fileCachePieceResourceStorage,
|
|
|
|
}),
|
|
|
|
SetReadahead: true,
|
2016-02-21 23:41:07 +08:00
|
|
|
// Can't readahead too far or the cache will thrash and drop data we
|
|
|
|
// thought we had.
|
2016-07-10 13:01:08 +08:00
|
|
|
Readahead: 0,
|
|
|
|
ExportClientStatus: true,
|
2016-02-21 23:41:07 +08:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-02-21 21:30:42 +08:00
|
|
|
func TestClientTransferVarious(t *testing.T) {
|
2016-10-10 11:57:34 +08:00
|
|
|
// Leecher storage
|
2016-08-31 15:48:50 +08:00
|
|
|
for _, ls := range []storageFactory{
|
|
|
|
NewFileCacheClientStorageFactory(FileCacheClientStorageFactoryParams{
|
|
|
|
Wrapper: fileCachePieceResourceStorage,
|
|
|
|
}),
|
|
|
|
storage.NewBoltDB,
|
2016-03-28 18:57:04 +08:00
|
|
|
} {
|
2016-10-10 11:57:34 +08:00
|
|
|
// Seeder storage
|
2016-09-02 13:10:57 +08:00
|
|
|
for _, ss := range []func(string) storage.ClientImpl{
|
2016-05-16 18:11:00 +08:00
|
|
|
storage.NewFile,
|
|
|
|
storage.NewMMap,
|
|
|
|
} {
|
|
|
|
for _, responsive := range []bool{false, true} {
|
2016-03-28 18:57:04 +08:00
|
|
|
testClientTransfer(t, testClientTransferParams{
|
2016-08-31 15:48:50 +08:00
|
|
|
Responsive: responsive,
|
|
|
|
SeederStorage: ss,
|
|
|
|
LeecherStorage: ls,
|
2016-03-28 18:57:04 +08:00
|
|
|
})
|
2016-05-16 18:11:00 +08:00
|
|
|
for _, readahead := range []int64{-1, 0, 1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14, 15, 20} {
|
|
|
|
testClientTransfer(t, testClientTransferParams{
|
2016-08-31 15:48:50 +08:00
|
|
|
SeederStorage: ss,
|
|
|
|
Responsive: responsive,
|
|
|
|
SetReadahead: true,
|
|
|
|
Readahead: readahead,
|
|
|
|
LeecherStorage: ls,
|
2016-05-16 18:11:00 +08:00
|
|
|
})
|
|
|
|
}
|
2016-03-28 18:57:04 +08:00
|
|
|
}
|
2016-02-21 21:30:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type testClientTransferParams struct {
|
2016-10-10 14:29:39 +08:00
|
|
|
Responsive bool
|
|
|
|
Readahead int64
|
|
|
|
SetReadahead bool
|
|
|
|
ExportClientStatus bool
|
|
|
|
LeecherStorage func(string) storage.ClientImpl
|
|
|
|
SeederStorage func(string) storage.ClientImpl
|
|
|
|
SeederUploadRateLimiter *rate.Limiter
|
|
|
|
LeecherDownloadRateLimiter *rate.Limiter
|
2016-02-21 21:30:42 +08:00
|
|
|
}
|
|
|
|
|
2016-07-05 13:52:33 +08:00
|
|
|
// Creates a seeder and a leecher, and ensures the data transfers when a read
|
|
|
|
// is attempted on the leecher.
|
2016-02-21 21:30:42 +08:00
|
|
|
func testClientTransfer(t *testing.T, ps testClientTransferParams) {
|
2015-02-26 22:46:02 +08:00
|
|
|
greetingTempDir, mi := testutil.GreetingTestTorrent()
|
|
|
|
defer os.RemoveAll(greetingTempDir)
|
2016-07-23 22:35:34 +08:00
|
|
|
// Create seeder and a Torrent.
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
2015-06-16 14:57:47 +08:00
|
|
|
cfg.Seed = true
|
2016-10-10 11:58:29 +08:00
|
|
|
cfg.UploadRateLimiter = ps.SeederUploadRateLimiter
|
2016-08-31 18:14:57 +08:00
|
|
|
// cfg.ListenAddr = "localhost:4000"
|
2016-03-28 18:57:04 +08:00
|
|
|
if ps.SeederStorage != nil {
|
|
|
|
cfg.DefaultStorage = ps.SeederStorage(greetingTempDir)
|
2017-06-01 20:57:08 +08:00
|
|
|
defer cfg.DefaultStorage.Close()
|
2016-03-28 18:57:04 +08:00
|
|
|
} else {
|
|
|
|
cfg.DataDir = greetingTempDir
|
|
|
|
}
|
2017-06-01 20:57:08 +08:00
|
|
|
seeder, err := NewClient(cfg)
|
2016-02-21 19:08:01 +08:00
|
|
|
require.NoError(t, err)
|
2016-02-21 21:30:42 +08:00
|
|
|
if ps.ExportClientStatus {
|
2016-02-21 23:36:41 +08:00
|
|
|
testutil.ExportStatusWriter(seeder, "s")
|
2016-02-21 21:30:42 +08:00
|
|
|
}
|
2017-09-15 17:22:32 +08:00
|
|
|
seederTorrent, _, _ := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
|
2017-12-02 06:58:08 +08:00
|
|
|
// Run a Stats right after Closing the Client. This will trigger the Stats
|
|
|
|
// panic in #214 caused by RemoteAddr on Closed uTP sockets.
|
|
|
|
defer seederTorrent.Stats()
|
|
|
|
defer seeder.Close()
|
2017-09-15 17:22:32 +08:00
|
|
|
seederTorrent.VerifyData()
|
2016-07-23 22:35:34 +08:00
|
|
|
// Create leecher and a Torrent.
|
2015-02-26 22:46:02 +08:00
|
|
|
leecherDataDir, err := ioutil.TempDir("", "")
|
2016-02-21 19:08:01 +08:00
|
|
|
require.NoError(t, err)
|
2015-02-26 22:46:02 +08:00
|
|
|
defer os.RemoveAll(leecherDataDir)
|
2016-10-10 11:58:29 +08:00
|
|
|
if ps.LeecherStorage == nil {
|
|
|
|
cfg.DataDir = leecherDataDir
|
|
|
|
} else {
|
|
|
|
cfg.DefaultStorage = ps.LeecherStorage(leecherDataDir)
|
|
|
|
}
|
2016-10-10 14:29:39 +08:00
|
|
|
cfg.DownloadRateLimiter = ps.LeecherDownloadRateLimiter
|
2016-08-31 18:14:57 +08:00
|
|
|
// cfg.ListenAddr = "localhost:4001"
|
2017-06-01 20:57:08 +08:00
|
|
|
leecher, err := NewClient(cfg)
|
2016-02-26 19:10:09 +08:00
|
|
|
require.NoError(t, err)
|
2015-03-08 14:28:14 +08:00
|
|
|
defer leecher.Close()
|
2016-02-21 21:30:42 +08:00
|
|
|
if ps.ExportClientStatus {
|
2016-02-21 23:36:41 +08:00
|
|
|
testutil.ExportStatusWriter(leecher, "l")
|
2016-02-21 21:30:42 +08:00
|
|
|
}
|
2016-02-21 19:08:01 +08:00
|
|
|
leecherGreeting, new, err := leecher.AddTorrentSpec(func() (ret *TorrentSpec) {
|
2015-07-15 13:31:18 +08:00
|
|
|
ret = TorrentSpecFromMetaInfo(mi)
|
|
|
|
ret.ChunkSize = 2
|
|
|
|
return
|
|
|
|
}())
|
2016-02-21 19:08:01 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
assert.True(t, new)
|
2016-07-23 22:35:34 +08:00
|
|
|
// Now do some things with leecher and seeder.
|
2016-07-06 06:31:30 +08:00
|
|
|
addClientPeer(leecherGreeting, seeder)
|
2015-04-14 21:59:41 +08:00
|
|
|
r := leecherGreeting.NewReader()
|
|
|
|
defer r.Close()
|
2016-02-21 21:30:42 +08:00
|
|
|
if ps.Responsive {
|
|
|
|
r.SetResponsive()
|
|
|
|
}
|
|
|
|
if ps.SetReadahead {
|
|
|
|
r.SetReadahead(ps.Readahead)
|
|
|
|
}
|
2016-07-05 13:52:33 +08:00
|
|
|
assertReadAllGreeting(t, r)
|
|
|
|
// After one read through, we can assume certain torrent statistics.
|
2016-07-12 14:42:04 +08:00
|
|
|
// These are not a strict requirement. It is however interesting to
|
|
|
|
// follow.
|
2016-08-31 18:14:57 +08:00
|
|
|
// t.Logf("%#v", seederTorrent.Stats())
|
2016-09-02 13:26:21 +08:00
|
|
|
// assert.EqualValues(t, 13, seederTorrent.Stats().DataBytesWritten)
|
|
|
|
// assert.EqualValues(t, 8, seederTorrent.Stats().ChunksWritten)
|
|
|
|
// assert.EqualValues(t, 13, leecherGreeting.Stats().DataBytesRead)
|
|
|
|
// assert.EqualValues(t, 8, leecherGreeting.Stats().ChunksRead)
|
2016-07-06 06:31:30 +08:00
|
|
|
// Read through again for the cases where the torrent data size exceeds
|
|
|
|
// the size of the cache.
|
2016-07-05 13:52:33 +08:00
|
|
|
assertReadAllGreeting(t, r)
|
|
|
|
}
|
|
|
|
|
|
|
|
func assertReadAllGreeting(t *testing.T, r io.ReadSeeker) {
|
2017-11-07 13:11:59 +08:00
|
|
|
pos, err := r.Seek(0, io.SeekStart)
|
2016-07-05 13:52:33 +08:00
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.EqualValues(t, 0, pos)
|
|
|
|
_greeting, err := ioutil.ReadAll(r)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.EqualValues(t, testutil.GreetingFileContents, _greeting)
|
2015-02-26 22:46:02 +08:00
|
|
|
}
|
2015-03-04 10:07:11 +08:00
|
|
|
|
2015-08-12 14:51:12 +08:00
|
|
|
// Check that after completing leeching, a leecher transitions to a seeding
|
|
|
|
// correctly. Connected in a chain like so: Seeder <-> Leecher <-> LeecherLeecher.
|
|
|
|
func TestSeedAfterDownloading(t *testing.T) {
|
|
|
|
greetingTempDir, mi := testutil.GreetingTestTorrent()
|
|
|
|
defer os.RemoveAll(greetingTempDir)
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
2015-08-12 14:51:12 +08:00
|
|
|
cfg.Seed = true
|
|
|
|
cfg.DataDir = greetingTempDir
|
2017-06-01 20:57:08 +08:00
|
|
|
seeder, err := NewClient(cfg)
|
2016-03-30 16:16:40 +08:00
|
|
|
require.NoError(t, err)
|
2015-08-12 14:51:12 +08:00
|
|
|
defer seeder.Close()
|
2016-02-21 23:36:41 +08:00
|
|
|
testutil.ExportStatusWriter(seeder, "s")
|
2017-09-15 17:22:32 +08:00
|
|
|
seederTorrent, _, _ := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
|
|
|
|
seederTorrent.VerifyData()
|
2015-08-12 14:51:12 +08:00
|
|
|
cfg.DataDir, err = ioutil.TempDir("", "")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(cfg.DataDir)
|
2017-06-01 20:57:08 +08:00
|
|
|
leecher, err := NewClient(cfg)
|
2016-03-30 16:16:40 +08:00
|
|
|
require.NoError(t, err)
|
2015-08-12 14:51:12 +08:00
|
|
|
defer leecher.Close()
|
2016-02-21 23:36:41 +08:00
|
|
|
testutil.ExportStatusWriter(leecher, "l")
|
2015-08-12 14:51:12 +08:00
|
|
|
cfg.Seed = false
|
2016-03-28 17:38:30 +08:00
|
|
|
// cfg.TorrentDataOpener = nil
|
2015-08-12 14:51:12 +08:00
|
|
|
cfg.DataDir, err = ioutil.TempDir("", "")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(cfg.DataDir)
|
2017-06-01 20:57:08 +08:00
|
|
|
leecherLeecher, _ := NewClient(cfg)
|
2015-08-12 14:51:12 +08:00
|
|
|
defer leecherLeecher.Close()
|
2016-02-21 23:36:41 +08:00
|
|
|
testutil.ExportStatusWriter(leecherLeecher, "ll")
|
2015-08-12 14:51:12 +08:00
|
|
|
leecherGreeting, _, _ := leecher.AddTorrentSpec(func() (ret *TorrentSpec) {
|
|
|
|
ret = TorrentSpecFromMetaInfo(mi)
|
|
|
|
ret.ChunkSize = 2
|
|
|
|
return
|
|
|
|
}())
|
|
|
|
llg, _, _ := leecherLeecher.AddTorrentSpec(func() (ret *TorrentSpec) {
|
|
|
|
ret = TorrentSpecFromMetaInfo(mi)
|
|
|
|
ret.ChunkSize = 3
|
|
|
|
return
|
|
|
|
}())
|
|
|
|
// Simultaneously DownloadAll in Leecher, and read the contents
|
|
|
|
// consecutively in LeecherLeecher. This non-deterministically triggered a
|
|
|
|
// case where the leecher wouldn't unchoke the LeecherLeecher.
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
r := llg.NewReader()
|
|
|
|
defer r.Close()
|
|
|
|
b, err := ioutil.ReadAll(r)
|
|
|
|
require.NoError(t, err)
|
2016-01-18 22:41:33 +08:00
|
|
|
assert.EqualValues(t, testutil.GreetingFileContents, b)
|
2015-08-12 14:51:12 +08:00
|
|
|
}()
|
2016-07-06 06:31:30 +08:00
|
|
|
addClientPeer(leecherGreeting, seeder)
|
|
|
|
addClientPeer(leecherGreeting, leecherLeecher)
|
2015-08-12 14:51:12 +08:00
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
leecherGreeting.DownloadAll()
|
|
|
|
leecher.WaitAll()
|
|
|
|
}()
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
2015-08-03 14:23:05 +08:00
|
|
|
func TestMergingTrackersByAddingSpecs(t *testing.T) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(TestingConfig())
|
2016-02-22 11:30:02 +08:00
|
|
|
require.NoError(t, err)
|
2015-03-27 23:50:55 +08:00
|
|
|
defer cl.Close()
|
|
|
|
spec := TorrentSpec{}
|
|
|
|
T, new, _ := cl.AddTorrentSpec(&spec)
|
|
|
|
if !new {
|
2015-08-03 14:23:05 +08:00
|
|
|
t.FailNow()
|
2015-03-27 23:50:55 +08:00
|
|
|
}
|
|
|
|
spec.Trackers = [][]string{{"http://a"}, {"udp://b"}}
|
|
|
|
_, new, _ = cl.AddTorrentSpec(&spec)
|
2016-05-22 20:45:08 +08:00
|
|
|
assert.False(t, new)
|
|
|
|
assert.EqualValues(t, [][]string{{"http://a"}, {"udp://b"}}, T.metainfo.AnnounceList)
|
|
|
|
// Because trackers are disabled in TestingConfig.
|
|
|
|
assert.EqualValues(t, 0, len(T.trackerAnnouncers))
|
2015-03-27 23:50:55 +08:00
|
|
|
}
|
2015-06-02 22:16:38 +08:00
|
|
|
|
2016-03-28 17:38:30 +08:00
|
|
|
type badStorage struct{}
|
|
|
|
|
2017-09-18 10:15:14 +08:00
|
|
|
var _ storage.ClientImpl = badStorage{}
|
|
|
|
|
2016-09-02 13:10:57 +08:00
|
|
|
func (bs badStorage) OpenTorrent(*metainfo.Info, metainfo.Hash) (storage.TorrentImpl, error) {
|
2016-04-19 12:11:11 +08:00
|
|
|
return bs, nil
|
2016-03-28 19:40:29 +08:00
|
|
|
}
|
|
|
|
|
2016-04-19 12:11:11 +08:00
|
|
|
func (bs badStorage) Close() error {
|
2016-03-28 19:40:29 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-09-02 13:10:57 +08:00
|
|
|
func (bs badStorage) Piece(p metainfo.Piece) storage.PieceImpl {
|
2016-03-28 17:38:30 +08:00
|
|
|
return badStoragePiece{p}
|
|
|
|
}
|
2015-10-01 22:09:04 +08:00
|
|
|
|
2016-03-28 17:38:30 +08:00
|
|
|
type badStoragePiece struct {
|
|
|
|
p metainfo.Piece
|
|
|
|
}
|
2015-06-02 22:16:38 +08:00
|
|
|
|
2017-10-12 13:09:32 +08:00
|
|
|
var _ storage.PieceImpl = badStoragePiece{}
|
|
|
|
|
2016-04-19 12:11:11 +08:00
|
|
|
func (p badStoragePiece) WriteAt(b []byte, off int64) (int, error) {
|
2015-06-02 22:16:38 +08:00
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
2017-10-12 13:09:32 +08:00
|
|
|
func (p badStoragePiece) Completion() storage.Completion {
|
|
|
|
return storage.Completion{Complete: true, Ok: true}
|
2015-06-02 22:16:38 +08:00
|
|
|
}
|
|
|
|
|
2016-04-19 12:11:11 +08:00
|
|
|
func (p badStoragePiece) MarkComplete() error {
|
2016-02-17 15:26:10 +08:00
|
|
|
return errors.New("psyyyyyyyche")
|
|
|
|
}
|
|
|
|
|
2016-09-02 13:10:57 +08:00
|
|
|
func (p badStoragePiece) MarkNotComplete() error {
|
|
|
|
return errors.New("psyyyyyyyche")
|
|
|
|
}
|
|
|
|
|
2016-04-19 12:11:11 +08:00
|
|
|
func (p badStoragePiece) randomlyTruncatedDataString() string {
|
2016-02-17 15:26:10 +08:00
|
|
|
return "hello, world\n"[:rand.Intn(14)]
|
2015-06-02 22:16:38 +08:00
|
|
|
}
|
|
|
|
|
2016-04-19 12:11:11 +08:00
|
|
|
func (p badStoragePiece) ReadAt(b []byte, off int64) (n int, err error) {
|
|
|
|
r := strings.NewReader(p.randomlyTruncatedDataString())
|
|
|
|
return r.ReadAt(b, off+p.p.Offset())
|
2015-06-02 22:16:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// We read from a piece which is marked completed, but is missing data.
|
|
|
|
func TestCompletedPieceWrongSize(t *testing.T) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
2016-03-28 17:38:30 +08:00
|
|
|
cfg.DefaultStorage = badStorage{}
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(cfg)
|
2016-05-11 19:11:52 +08:00
|
|
|
require.NoError(t, err)
|
2015-06-02 22:16:38 +08:00
|
|
|
defer cl.Close()
|
2016-08-26 18:29:05 +08:00
|
|
|
info := metainfo.Info{
|
|
|
|
PieceLength: 15,
|
|
|
|
Pieces: make([]byte, 20),
|
|
|
|
Files: []metainfo.FileInfo{
|
2016-11-22 11:01:09 +08:00
|
|
|
{Path: []string{"greeting"}, Length: 13},
|
2015-06-02 22:16:38 +08:00
|
|
|
},
|
2016-05-09 13:47:39 +08:00
|
|
|
}
|
2016-08-26 18:29:05 +08:00
|
|
|
b, err := bencode.Marshal(info)
|
2017-11-07 13:11:59 +08:00
|
|
|
require.NoError(t, err)
|
2016-05-09 13:47:39 +08:00
|
|
|
tt, new, err := cl.AddTorrentSpec(&TorrentSpec{
|
2016-08-26 18:29:05 +08:00
|
|
|
InfoBytes: b,
|
|
|
|
InfoHash: metainfo.HashBytes(b),
|
2015-06-02 22:16:38 +08:00
|
|
|
})
|
2016-02-17 15:26:10 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer tt.Drop()
|
|
|
|
assert.True(t, new)
|
2015-06-02 22:16:38 +08:00
|
|
|
r := tt.NewReader()
|
|
|
|
defer r.Close()
|
2016-08-26 18:29:05 +08:00
|
|
|
b, err = ioutil.ReadAll(r)
|
2016-02-17 15:26:10 +08:00
|
|
|
assert.Len(t, b, 13)
|
|
|
|
assert.NoError(t, err)
|
2015-06-02 22:16:38 +08:00
|
|
|
}
|
2015-06-22 17:43:22 +08:00
|
|
|
|
|
|
|
func BenchmarkAddLargeTorrent(b *testing.B) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
2015-06-22 17:43:22 +08:00
|
|
|
cfg.DisableTCP = true
|
|
|
|
cfg.DisableUTP = true
|
|
|
|
cfg.ListenAddr = "redonk"
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(cfg)
|
2016-08-30 12:09:01 +08:00
|
|
|
require.NoError(b, err)
|
2015-06-22 17:43:22 +08:00
|
|
|
defer cl.Close()
|
|
|
|
for range iter.N(b.N) {
|
|
|
|
t, err := cl.AddTorrentFromFile("testdata/bootstrap.dat.torrent")
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
t.Drop()
|
|
|
|
}
|
|
|
|
}
|
2015-07-15 13:51:42 +08:00
|
|
|
|
|
|
|
func TestResponsive(t *testing.T) {
|
|
|
|
seederDataDir, mi := testutil.GreetingTestTorrent()
|
|
|
|
defer os.RemoveAll(seederDataDir)
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
2015-07-15 13:51:42 +08:00
|
|
|
cfg.Seed = true
|
|
|
|
cfg.DataDir = seederDataDir
|
2017-06-01 20:57:08 +08:00
|
|
|
seeder, err := NewClient(cfg)
|
2015-07-15 13:51:42 +08:00
|
|
|
require.Nil(t, err)
|
|
|
|
defer seeder.Close()
|
2017-09-15 17:22:32 +08:00
|
|
|
seederTorrent, _, _ := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
|
|
|
|
seederTorrent.VerifyData()
|
2015-07-15 13:51:42 +08:00
|
|
|
leecherDataDir, err := ioutil.TempDir("", "")
|
|
|
|
require.Nil(t, err)
|
|
|
|
defer os.RemoveAll(leecherDataDir)
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg = TestingConfig()
|
2015-07-15 13:51:42 +08:00
|
|
|
cfg.DataDir = leecherDataDir
|
2017-06-01 20:57:08 +08:00
|
|
|
leecher, err := NewClient(cfg)
|
2015-07-15 13:51:42 +08:00
|
|
|
require.Nil(t, err)
|
|
|
|
defer leecher.Close()
|
|
|
|
leecherTorrent, _, _ := leecher.AddTorrentSpec(func() (ret *TorrentSpec) {
|
|
|
|
ret = TorrentSpecFromMetaInfo(mi)
|
|
|
|
ret.ChunkSize = 2
|
|
|
|
return
|
|
|
|
}())
|
2016-07-06 06:31:30 +08:00
|
|
|
addClientPeer(leecherTorrent, seeder)
|
2015-07-15 13:51:42 +08:00
|
|
|
reader := leecherTorrent.NewReader()
|
2016-02-04 22:17:26 +08:00
|
|
|
defer reader.Close()
|
2015-07-15 13:51:42 +08:00
|
|
|
reader.SetReadahead(0)
|
|
|
|
reader.SetResponsive()
|
|
|
|
b := make([]byte, 2)
|
2017-11-07 13:11:59 +08:00
|
|
|
_, err = reader.Seek(3, io.SeekStart)
|
2016-01-18 15:35:14 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = io.ReadFull(reader, b)
|
2015-07-15 13:51:42 +08:00
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.EqualValues(t, "lo", string(b))
|
2017-11-07 13:11:59 +08:00
|
|
|
_, err = reader.Seek(11, io.SeekStart)
|
2016-01-18 15:35:14 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
n, err := io.ReadFull(reader, b)
|
2015-07-15 13:51:42 +08:00
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.EqualValues(t, 2, n)
|
|
|
|
assert.EqualValues(t, "d\n", string(b))
|
|
|
|
}
|
2015-08-03 23:07:22 +08:00
|
|
|
|
2015-11-05 21:40:47 +08:00
|
|
|
func TestTorrentDroppedDuringResponsiveRead(t *testing.T) {
|
|
|
|
seederDataDir, mi := testutil.GreetingTestTorrent()
|
|
|
|
defer os.RemoveAll(seederDataDir)
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
2015-11-05 21:40:47 +08:00
|
|
|
cfg.Seed = true
|
|
|
|
cfg.DataDir = seederDataDir
|
2017-06-01 20:57:08 +08:00
|
|
|
seeder, err := NewClient(cfg)
|
2015-11-05 21:40:47 +08:00
|
|
|
require.Nil(t, err)
|
|
|
|
defer seeder.Close()
|
2017-09-15 17:22:32 +08:00
|
|
|
seederTorrent, _, _ := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
|
|
|
|
seederTorrent.VerifyData()
|
2015-11-05 21:40:47 +08:00
|
|
|
leecherDataDir, err := ioutil.TempDir("", "")
|
|
|
|
require.Nil(t, err)
|
|
|
|
defer os.RemoveAll(leecherDataDir)
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg = TestingConfig()
|
2015-11-05 21:40:47 +08:00
|
|
|
cfg.DataDir = leecherDataDir
|
2017-06-01 20:57:08 +08:00
|
|
|
leecher, err := NewClient(cfg)
|
2015-11-05 21:40:47 +08:00
|
|
|
require.Nil(t, err)
|
|
|
|
defer leecher.Close()
|
|
|
|
leecherTorrent, _, _ := leecher.AddTorrentSpec(func() (ret *TorrentSpec) {
|
|
|
|
ret = TorrentSpecFromMetaInfo(mi)
|
|
|
|
ret.ChunkSize = 2
|
|
|
|
return
|
|
|
|
}())
|
2016-07-06 06:31:30 +08:00
|
|
|
addClientPeer(leecherTorrent, seeder)
|
2015-11-05 21:40:47 +08:00
|
|
|
reader := leecherTorrent.NewReader()
|
2016-02-04 22:17:26 +08:00
|
|
|
defer reader.Close()
|
2015-11-05 21:40:47 +08:00
|
|
|
reader.SetReadahead(0)
|
|
|
|
reader.SetResponsive()
|
|
|
|
b := make([]byte, 2)
|
2017-11-07 13:11:59 +08:00
|
|
|
_, err = reader.Seek(3, io.SeekStart)
|
2016-01-18 15:35:14 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = io.ReadFull(reader, b)
|
2015-11-05 21:40:47 +08:00
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.EqualValues(t, "lo", string(b))
|
|
|
|
go leecherTorrent.Drop()
|
2017-11-07 13:11:59 +08:00
|
|
|
_, err = reader.Seek(11, io.SeekStart)
|
2016-01-18 15:35:14 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
n, err := reader.Read(b)
|
2015-11-05 21:40:47 +08:00
|
|
|
assert.EqualError(t, err, "torrent closed")
|
|
|
|
assert.EqualValues(t, 0, n)
|
|
|
|
}
|
|
|
|
|
2015-08-03 23:07:22 +08:00
|
|
|
func TestDHTInheritBlocklist(t *testing.T) {
|
|
|
|
ipl := iplist.New(nil)
|
|
|
|
require.NotNil(t, ipl)
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
2016-01-16 21:12:53 +08:00
|
|
|
cfg.IPBlocklist = ipl
|
|
|
|
cfg.NoDHT = false
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(cfg)
|
2015-08-03 23:07:22 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer cl.Close()
|
|
|
|
require.Equal(t, ipl, cl.DHT().IPBlocklist())
|
|
|
|
}
|
2015-08-23 10:50:32 +08:00
|
|
|
|
|
|
|
// Check that stuff is merged in subsequent AddTorrentSpec for the same
|
|
|
|
// infohash.
|
|
|
|
func TestAddTorrentSpecMerging(t *testing.T) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(TestingConfig())
|
2015-08-23 10:50:32 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer cl.Close()
|
|
|
|
dir, mi := testutil.GreetingTestTorrent()
|
|
|
|
defer os.RemoveAll(dir)
|
2016-05-05 20:40:38 +08:00
|
|
|
tt, new, err := cl.AddTorrentSpec(&TorrentSpec{
|
2016-08-26 18:29:05 +08:00
|
|
|
InfoHash: mi.HashInfoBytes(),
|
2016-05-05 20:40:38 +08:00
|
|
|
})
|
2015-08-23 10:50:32 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, new)
|
|
|
|
require.Nil(t, tt.Info())
|
|
|
|
_, new, err = cl.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.False(t, new)
|
|
|
|
require.NotNil(t, tt.Info())
|
|
|
|
}
|
|
|
|
|
2015-09-17 10:53:52 +08:00
|
|
|
func TestTorrentDroppedBeforeGotInfo(t *testing.T) {
|
|
|
|
dir, mi := testutil.GreetingTestTorrent()
|
|
|
|
os.RemoveAll(dir)
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, _ := NewClient(TestingConfig())
|
2015-09-17 10:53:52 +08:00
|
|
|
defer cl.Close()
|
2016-05-05 20:40:38 +08:00
|
|
|
tt, _, _ := cl.AddTorrentSpec(&TorrentSpec{
|
2016-08-26 18:29:05 +08:00
|
|
|
InfoHash: mi.HashInfoBytes(),
|
2016-05-05 20:40:38 +08:00
|
|
|
})
|
2015-09-17 10:53:52 +08:00
|
|
|
tt.Drop()
|
|
|
|
assert.EqualValues(t, 0, len(cl.Torrents()))
|
|
|
|
select {
|
|
|
|
case <-tt.GotInfo():
|
|
|
|
t.FailNow()
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
2016-02-17 14:09:05 +08:00
|
|
|
|
2016-09-02 13:10:57 +08:00
|
|
|
func writeTorrentData(ts *storage.Torrent, info metainfo.Info, b []byte) {
|
2016-03-29 08:14:34 +08:00
|
|
|
for i := range iter.N(info.NumPieces()) {
|
2016-09-02 13:10:57 +08:00
|
|
|
p := info.Piece(i)
|
|
|
|
ts.Piece(p).WriteAt(b[p.Offset():p.Offset()+p.Length()], 0)
|
2016-03-29 08:14:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-02 13:10:57 +08:00
|
|
|
func testAddTorrentPriorPieceCompletion(t *testing.T, alreadyCompleted bool, csf func(*filecache.Cache) storage.ClientImpl) {
|
2016-03-29 08:14:34 +08:00
|
|
|
fileCacheDir, err := ioutil.TempDir("", "")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(fileCacheDir)
|
|
|
|
fileCache, err := filecache.NewCache(fileCacheDir)
|
|
|
|
require.NoError(t, err)
|
|
|
|
greetingDataTempDir, greetingMetainfo := testutil.GreetingTestTorrent()
|
|
|
|
defer os.RemoveAll(greetingDataTempDir)
|
2016-05-16 18:11:00 +08:00
|
|
|
filePieceStore := csf(fileCache)
|
2017-09-15 17:22:32 +08:00
|
|
|
defer filePieceStore.Close()
|
2016-09-20 16:39:07 +08:00
|
|
|
info, err := greetingMetainfo.UnmarshalInfo()
|
|
|
|
require.NoError(t, err)
|
2016-08-26 18:29:05 +08:00
|
|
|
ih := greetingMetainfo.HashInfoBytes()
|
2016-09-02 13:10:57 +08:00
|
|
|
greetingData, err := storage.NewClient(filePieceStore).OpenTorrent(&info, ih)
|
2016-03-29 08:14:34 +08:00
|
|
|
require.NoError(t, err)
|
2016-08-26 18:29:05 +08:00
|
|
|
writeTorrentData(greetingData, info, []byte(testutil.GreetingFileContents))
|
2016-03-29 08:14:34 +08:00
|
|
|
// require.Equal(t, len(testutil.GreetingFileContents), written)
|
|
|
|
// require.NoError(t, err)
|
2016-08-26 18:29:05 +08:00
|
|
|
for i := 0; i < info.NumPieces(); i++ {
|
|
|
|
p := info.Piece(i)
|
2016-03-29 08:14:34 +08:00
|
|
|
if alreadyCompleted {
|
2017-09-15 17:22:32 +08:00
|
|
|
require.NoError(t, greetingData.Piece(p).MarkComplete())
|
2016-03-29 08:14:34 +08:00
|
|
|
}
|
|
|
|
}
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
2016-03-29 08:14:34 +08:00
|
|
|
// TODO: Disable network option?
|
|
|
|
cfg.DisableTCP = true
|
|
|
|
cfg.DisableUTP = true
|
|
|
|
cfg.DefaultStorage = filePieceStore
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(cfg)
|
2016-03-29 08:14:34 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer cl.Close()
|
|
|
|
tt, err := cl.AddTorrent(greetingMetainfo)
|
|
|
|
require.NoError(t, err)
|
|
|
|
psrs := tt.PieceStateRuns()
|
|
|
|
assert.Len(t, psrs, 1)
|
|
|
|
assert.EqualValues(t, 3, psrs[0].Length)
|
|
|
|
assert.Equal(t, alreadyCompleted, psrs[0].Complete)
|
|
|
|
if alreadyCompleted {
|
|
|
|
r := tt.NewReader()
|
|
|
|
b, err := ioutil.ReadAll(r)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.EqualValues(t, testutil.GreetingFileContents, b)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAddTorrentPiecesAlreadyCompleted(t *testing.T) {
|
2016-05-16 18:11:00 +08:00
|
|
|
testAddTorrentPriorPieceCompletion(t, true, fileCachePieceResourceStorage)
|
2016-03-29 08:14:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAddTorrentPiecesNotAlreadyCompleted(t *testing.T) {
|
2016-05-16 18:11:00 +08:00
|
|
|
testAddTorrentPriorPieceCompletion(t, false, fileCachePieceResourceStorage)
|
2016-03-29 08:14:34 +08:00
|
|
|
}
|
2016-02-23 22:48:44 +08:00
|
|
|
|
2016-02-24 18:56:50 +08:00
|
|
|
func TestAddMetainfoWithNodes(t *testing.T) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
2017-08-07 20:11:25 +08:00
|
|
|
cfg.ListenAddr = ":0"
|
2016-02-23 22:48:44 +08:00
|
|
|
cfg.NoDHT = false
|
2016-02-24 18:56:50 +08:00
|
|
|
// For now, we want to just jam the nodes into the table, without
|
|
|
|
// verifying them first. Also the DHT code doesn't support mixing secure
|
|
|
|
// and insecure nodes if security is enabled (yet).
|
|
|
|
cfg.DHTConfig.NoSecurity = true
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(cfg)
|
2016-02-23 22:48:44 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer cl.Close()
|
2017-07-20 22:40:49 +08:00
|
|
|
assert.EqualValues(t, 0, cl.DHT().NumNodes()+cl.DHT().Stats().OutstandingTransactions)
|
2016-02-23 22:48:44 +08:00
|
|
|
tt, err := cl.AddTorrentFromFile("metainfo/testdata/issue_65a.torrent")
|
|
|
|
require.NoError(t, err)
|
2017-07-20 22:40:49 +08:00
|
|
|
// Nodes are not added or exposed in Torrent's metainfo. We just randomly
|
|
|
|
// check if the announce-list is here instead. TODO: Add nodes.
|
2016-05-22 20:45:08 +08:00
|
|
|
assert.Len(t, tt.metainfo.AnnounceList, 5)
|
2017-07-20 22:40:49 +08:00
|
|
|
// There are 6 nodes in the torrent file.
|
|
|
|
assert.EqualValues(t, 6, cl.DHT().NumNodes()+cl.DHT().Stats().OutstandingTransactions)
|
2016-02-23 22:48:44 +08:00
|
|
|
}
|
2016-02-26 19:10:29 +08:00
|
|
|
|
|
|
|
type testDownloadCancelParams struct {
|
|
|
|
ExportClientStatus bool
|
|
|
|
SetLeecherStorageCapacity bool
|
|
|
|
LeecherStorageCapacity int64
|
|
|
|
Cancel bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func testDownloadCancel(t *testing.T, ps testDownloadCancelParams) {
|
|
|
|
greetingTempDir, mi := testutil.GreetingTestTorrent()
|
|
|
|
defer os.RemoveAll(greetingTempDir)
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
2016-02-26 19:10:29 +08:00
|
|
|
cfg.Seed = true
|
|
|
|
cfg.DataDir = greetingTempDir
|
2017-06-01 20:57:08 +08:00
|
|
|
seeder, err := NewClient(cfg)
|
2016-02-26 19:10:29 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer seeder.Close()
|
|
|
|
if ps.ExportClientStatus {
|
|
|
|
testutil.ExportStatusWriter(seeder, "s")
|
|
|
|
}
|
2017-09-15 17:22:32 +08:00
|
|
|
seederTorrent, _, _ := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
|
|
|
|
seederTorrent.VerifyData()
|
2016-02-26 19:10:29 +08:00
|
|
|
leecherDataDir, err := ioutil.TempDir("", "")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(leecherDataDir)
|
2016-03-29 08:14:34 +08:00
|
|
|
fc, err := filecache.NewCache(leecherDataDir)
|
|
|
|
require.NoError(t, err)
|
|
|
|
if ps.SetLeecherStorageCapacity {
|
|
|
|
fc.SetCapacity(ps.LeecherStorageCapacity)
|
|
|
|
}
|
2017-01-05 14:00:59 +08:00
|
|
|
cfg.DefaultStorage = storage.NewResourcePieces(fc.AsResourceProvider())
|
2016-03-28 17:38:30 +08:00
|
|
|
cfg.DataDir = leecherDataDir
|
2017-06-01 20:57:08 +08:00
|
|
|
leecher, _ := NewClient(cfg)
|
2016-02-26 19:10:29 +08:00
|
|
|
defer leecher.Close()
|
|
|
|
if ps.ExportClientStatus {
|
|
|
|
testutil.ExportStatusWriter(leecher, "l")
|
|
|
|
}
|
|
|
|
leecherGreeting, new, err := leecher.AddTorrentSpec(func() (ret *TorrentSpec) {
|
|
|
|
ret = TorrentSpecFromMetaInfo(mi)
|
|
|
|
ret.ChunkSize = 2
|
|
|
|
return
|
|
|
|
}())
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.True(t, new)
|
|
|
|
psc := leecherGreeting.SubscribePieceStateChanges()
|
|
|
|
defer psc.Close()
|
|
|
|
leecherGreeting.DownloadAll()
|
|
|
|
if ps.Cancel {
|
|
|
|
leecherGreeting.CancelPieces(0, leecherGreeting.NumPieces())
|
|
|
|
}
|
2016-07-06 06:31:30 +08:00
|
|
|
addClientPeer(leecherGreeting, seeder)
|
2016-02-26 19:10:29 +08:00
|
|
|
completes := make(map[int]bool, 3)
|
|
|
|
values:
|
|
|
|
for {
|
2016-03-28 17:38:30 +08:00
|
|
|
// started := time.Now()
|
2016-02-26 19:10:29 +08:00
|
|
|
select {
|
|
|
|
case _v := <-psc.Values:
|
2016-03-28 17:38:30 +08:00
|
|
|
// log.Print(time.Since(started))
|
2016-02-26 19:10:29 +08:00
|
|
|
v := _v.(PieceStateChange)
|
|
|
|
completes[v.Index] = v.Complete
|
2016-02-26 19:18:08 +08:00
|
|
|
case <-time.After(100 * time.Millisecond):
|
2016-02-26 19:10:29 +08:00
|
|
|
break values
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ps.Cancel {
|
|
|
|
assert.EqualValues(t, map[int]bool{0: false, 1: false, 2: false}, completes)
|
|
|
|
} else {
|
|
|
|
assert.EqualValues(t, map[int]bool{0: true, 1: true, 2: true}, completes)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTorrentDownloadAll(t *testing.T) {
|
|
|
|
testDownloadCancel(t, testDownloadCancelParams{})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTorrentDownloadAllThenCancel(t *testing.T) {
|
|
|
|
testDownloadCancel(t, testDownloadCancelParams{
|
|
|
|
Cancel: true,
|
|
|
|
})
|
|
|
|
}
|
2016-03-22 10:09:02 +08:00
|
|
|
|
|
|
|
// Ensure that it's an error for a peer to send an invalid have message.
|
|
|
|
func TestPeerInvalidHave(t *testing.T) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(TestingConfig())
|
2016-03-22 10:09:02 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer cl.Close()
|
2016-08-26 18:29:05 +08:00
|
|
|
info := metainfo.Info{
|
|
|
|
PieceLength: 1,
|
|
|
|
Pieces: make([]byte, 20),
|
|
|
|
Files: []metainfo.FileInfo{{Length: 1}},
|
2016-05-09 13:47:39 +08:00
|
|
|
}
|
2016-08-26 18:29:05 +08:00
|
|
|
infoBytes, err := bencode.Marshal(info)
|
|
|
|
require.NoError(t, err)
|
2016-05-09 13:47:39 +08:00
|
|
|
tt, _new, err := cl.AddTorrentSpec(&TorrentSpec{
|
2016-08-26 18:29:05 +08:00
|
|
|
InfoBytes: infoBytes,
|
|
|
|
InfoHash: metainfo.HashBytes(infoBytes),
|
2017-09-18 10:15:14 +08:00
|
|
|
Storage: badStorage{},
|
2016-03-22 10:09:02 +08:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.True(t, _new)
|
|
|
|
defer tt.Drop()
|
|
|
|
cn := &connection{
|
2016-04-03 16:40:43 +08:00
|
|
|
t: tt,
|
2016-03-22 10:09:02 +08:00
|
|
|
}
|
|
|
|
assert.NoError(t, cn.peerSentHave(0))
|
|
|
|
assert.Error(t, cn.peerSentHave(1))
|
|
|
|
}
|
2016-03-28 17:38:30 +08:00
|
|
|
|
|
|
|
func TestPieceCompletedInStorageButNotClient(t *testing.T) {
|
|
|
|
greetingTempDir, greetingMetainfo := testutil.GreetingTestTorrent()
|
|
|
|
defer os.RemoveAll(greetingTempDir)
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
2016-03-28 17:38:30 +08:00
|
|
|
cfg.DataDir = greetingTempDir
|
2017-06-01 20:57:08 +08:00
|
|
|
seeder, err := NewClient(TestingConfig())
|
2016-03-28 17:38:30 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
seeder.AddTorrentSpec(&TorrentSpec{
|
2016-08-26 18:29:05 +08:00
|
|
|
InfoBytes: greetingMetainfo.InfoBytes,
|
2016-03-28 17:38:30 +08:00
|
|
|
})
|
|
|
|
}
|
2016-05-22 20:45:08 +08:00
|
|
|
|
|
|
|
func TestPrepareTrackerAnnounce(t *testing.T) {
|
|
|
|
cl := &Client{}
|
|
|
|
blocked, urlToUse, host, err := cl.prepareTrackerAnnounceUnlocked("http://localhost:1234/announce?herp")
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.False(t, blocked)
|
|
|
|
assert.EqualValues(t, "localhost:1234", host)
|
|
|
|
assert.EqualValues(t, "http://127.0.0.1:1234/announce?herp", urlToUse)
|
|
|
|
}
|
2016-05-24 17:35:23 +08:00
|
|
|
|
|
|
|
// Check that when the listen port is 0, all the protocols listened on have
|
|
|
|
// the same port, and it isn't zero.
|
|
|
|
func TestClientDynamicListenPortAllProtocols(t *testing.T) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(TestingConfig())
|
2016-05-24 17:35:23 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer cl.Close()
|
|
|
|
assert.NotEqual(t, 0, missinggo.AddrPort(cl.ListenAddr()))
|
|
|
|
assert.Equal(t, missinggo.AddrPort(cl.utpSock.Addr()), missinggo.AddrPort(cl.tcpListener.Addr()))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClientDynamicListenTCPOnly(t *testing.T) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
2016-05-24 17:35:23 +08:00
|
|
|
cfg.DisableUTP = true
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(cfg)
|
2016-05-24 17:35:23 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer cl.Close()
|
|
|
|
assert.NotEqual(t, 0, missinggo.AddrPort(cl.ListenAddr()))
|
|
|
|
assert.Nil(t, cl.utpSock)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClientDynamicListenUTPOnly(t *testing.T) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
2016-05-24 17:35:23 +08:00
|
|
|
cfg.DisableTCP = true
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(cfg)
|
2016-05-24 17:35:23 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer cl.Close()
|
|
|
|
assert.NotEqual(t, 0, missinggo.AddrPort(cl.ListenAddr()))
|
|
|
|
assert.Nil(t, cl.tcpListener)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClientDynamicListenPortNoProtocols(t *testing.T) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
2016-05-24 17:35:23 +08:00
|
|
|
cfg.DisableTCP = true
|
|
|
|
cfg.DisableUTP = true
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(cfg)
|
2016-05-24 17:35:23 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer cl.Close()
|
|
|
|
assert.Nil(t, cl.ListenAddr())
|
|
|
|
}
|
2016-07-06 06:30:34 +08:00
|
|
|
|
|
|
|
func addClientPeer(t *Torrent, cl *Client) {
|
|
|
|
t.AddPeers([]Peer{
|
2016-11-22 11:01:09 +08:00
|
|
|
{
|
2016-07-06 06:30:34 +08:00
|
|
|
IP: missinggo.AddrIP(cl.ListenAddr()),
|
|
|
|
Port: missinggo.AddrPort(cl.ListenAddr()),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func totalConns(tts []*Torrent) (ret int) {
|
|
|
|
for _, tt := range tts {
|
|
|
|
tt.cl.mu.Lock()
|
|
|
|
ret += len(tt.conns)
|
|
|
|
tt.cl.mu.Unlock()
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSetMaxEstablishedConn(t *testing.T) {
|
|
|
|
var tts []*Torrent
|
2016-08-26 18:29:05 +08:00
|
|
|
ih := testutil.GreetingMetaInfo().HashInfoBytes()
|
2016-07-06 06:30:34 +08:00
|
|
|
for i := range iter.N(3) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(TestingConfig())
|
2016-07-06 06:30:34 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer cl.Close()
|
|
|
|
tt, _ := cl.AddTorrentInfoHash(ih)
|
|
|
|
tt.SetMaxEstablishedConns(2)
|
|
|
|
testutil.ExportStatusWriter(cl, fmt.Sprintf("%d", i))
|
|
|
|
tts = append(tts, tt)
|
|
|
|
}
|
|
|
|
addPeers := func() {
|
|
|
|
for i, tt := range tts {
|
|
|
|
for _, _tt := range tts[:i] {
|
|
|
|
addClientPeer(tt, _tt.cl)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
waitTotalConns := func(num int) {
|
|
|
|
for totalConns(tts) != num {
|
|
|
|
time.Sleep(time.Millisecond)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
addPeers()
|
|
|
|
waitTotalConns(6)
|
|
|
|
tts[0].SetMaxEstablishedConns(1)
|
|
|
|
waitTotalConns(4)
|
|
|
|
tts[0].SetMaxEstablishedConns(0)
|
|
|
|
waitTotalConns(2)
|
|
|
|
tts[0].SetMaxEstablishedConns(1)
|
|
|
|
addPeers()
|
|
|
|
waitTotalConns(4)
|
|
|
|
tts[0].SetMaxEstablishedConns(2)
|
|
|
|
addPeers()
|
|
|
|
waitTotalConns(6)
|
|
|
|
}
|
2016-09-16 22:01:15 +08:00
|
|
|
|
|
|
|
func makeMagnet(t *testing.T, cl *Client, dir string, name string) string {
|
2016-09-21 19:17:22 +08:00
|
|
|
os.MkdirAll(dir, 0770)
|
2016-09-21 19:02:18 +08:00
|
|
|
file, err := os.Create(filepath.Join(dir, name))
|
2016-09-16 22:01:15 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
file.Write([]byte(name))
|
2016-09-20 16:39:36 +08:00
|
|
|
file.Close()
|
2016-09-16 22:01:15 +08:00
|
|
|
mi := metainfo.MetaInfo{}
|
|
|
|
mi.SetDefaults()
|
2016-09-20 16:39:36 +08:00
|
|
|
info := metainfo.Info{PieceLength: 256 * 1024}
|
2016-09-21 19:02:18 +08:00
|
|
|
err = info.BuildFromFilePath(filepath.Join(dir, name))
|
2016-09-16 22:01:15 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
mi.InfoBytes, err = bencode.Marshal(info)
|
|
|
|
require.NoError(t, err)
|
|
|
|
magnet := mi.Magnet(name, mi.HashInfoBytes()).String()
|
|
|
|
tr, err := cl.AddTorrent(&mi)
|
|
|
|
require.NoError(t, err)
|
2017-09-15 17:22:32 +08:00
|
|
|
require.True(t, tr.Seeding())
|
|
|
|
tr.VerifyData()
|
2016-09-16 22:01:15 +08:00
|
|
|
return magnet
|
|
|
|
}
|
|
|
|
|
|
|
|
// https://github.com/anacrolix/torrent/issues/114
|
|
|
|
func TestMultipleTorrentsWithEncryption(t *testing.T) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
2016-09-16 22:01:15 +08:00
|
|
|
cfg.DisableUTP = true
|
|
|
|
cfg.Seed = true
|
2016-09-21 19:02:18 +08:00
|
|
|
cfg.DataDir = filepath.Join(cfg.DataDir, "server")
|
2016-09-16 22:01:15 +08:00
|
|
|
cfg.Debug = true
|
|
|
|
cfg.ForceEncryption = true
|
|
|
|
os.Mkdir(cfg.DataDir, 0755)
|
2017-06-01 20:57:08 +08:00
|
|
|
server, err := NewClient(cfg)
|
2016-09-16 22:01:15 +08:00
|
|
|
require.NoError(t, err)
|
2016-09-21 19:02:18 +08:00
|
|
|
defer server.Close()
|
|
|
|
testutil.ExportStatusWriter(server, "s")
|
2016-09-16 22:01:15 +08:00
|
|
|
magnet1 := makeMagnet(t, server, cfg.DataDir, "test1")
|
|
|
|
makeMagnet(t, server, cfg.DataDir, "test2")
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg = TestingConfig()
|
2016-09-16 22:01:15 +08:00
|
|
|
cfg.DisableUTP = true
|
2016-09-21 19:02:18 +08:00
|
|
|
cfg.DataDir = filepath.Join(cfg.DataDir, "client")
|
2016-09-16 22:01:15 +08:00
|
|
|
cfg.Debug = true
|
|
|
|
cfg.ForceEncryption = true
|
2017-06-01 20:57:08 +08:00
|
|
|
client, err := NewClient(cfg)
|
2016-09-16 22:01:15 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer client.Close()
|
2016-09-21 19:02:18 +08:00
|
|
|
testutil.ExportStatusWriter(client, "c")
|
2016-09-16 22:01:15 +08:00
|
|
|
tr, err := client.AddMagnet(magnet1)
|
|
|
|
require.NoError(t, err)
|
2016-11-22 11:01:09 +08:00
|
|
|
tr.AddPeers([]Peer{{
|
2016-09-20 16:39:36 +08:00
|
|
|
IP: missinggo.AddrIP(server.ListenAddr()),
|
2016-09-16 22:01:15 +08:00
|
|
|
Port: missinggo.AddrPort(server.ListenAddr()),
|
2016-09-20 16:39:36 +08:00
|
|
|
}})
|
|
|
|
<-tr.GotInfo()
|
2016-09-16 22:01:15 +08:00
|
|
|
tr.DownloadAll()
|
|
|
|
client.WaitAll()
|
|
|
|
}
|
2017-09-13 22:25:29 +08:00
|
|
|
|
|
|
|
func TestClientAddressInUse(t *testing.T) {
|
|
|
|
s, _ := NewUtpSocket("udp", ":50007")
|
|
|
|
if s != nil {
|
|
|
|
defer s.Close()
|
|
|
|
}
|
|
|
|
cfg := TestingConfig()
|
|
|
|
cfg.ListenAddr = ":50007"
|
|
|
|
cl, err := NewClient(cfg)
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Nil(t, cl)
|
|
|
|
}
|