FedP2P/client_test.go

1074 lines
30 KiB
Go
Raw Normal View History

package torrent
import (
"context"
"encoding/binary"
"fmt"
2015-06-02 22:16:38 +08:00
"io"
"io/ioutil"
"net"
"os"
"path/filepath"
2015-08-12 14:51:12 +08:00
"sync"
"testing"
"time"
2014-08-21 16:24:19 +08:00
"github.com/anacrolix/dht"
_ "github.com/anacrolix/envpprof"
2015-08-03 22:29:01 +08:00
"github.com/anacrolix/missinggo"
2016-03-29 08:14:34 +08:00
"github.com/anacrolix/missinggo/filecache"
"github.com/bradfitz/iter"
"github.com/stretchr/testify/assert"
2015-07-15 13:51:42 +08:00
"github.com/stretchr/testify/require"
"golang.org/x/time/rate"
2015-04-29 22:31:34 +08:00
"github.com/anacrolix/torrent/bencode"
"github.com/anacrolix/torrent/internal/testutil"
"github.com/anacrolix/torrent/iplist"
2015-06-02 22:16:38 +08:00
"github.com/anacrolix/torrent/metainfo"
2016-03-28 17:38:30 +08:00
"github.com/anacrolix/torrent/storage"
)
func TestingConfig() *Config {
return &Config{
ListenAddr: "localhost:0",
NoDHT: true,
DataDir: tempDir(),
DisableTrackers: true,
NoDefaultPortForwarding: true,
// Debug: true,
}
}
2014-08-21 16:07:06 +08:00
func TestClientDefault(t *testing.T) {
cl, err := NewClient(TestingConfig())
require.NoError(t, err)
cl.Close()
}
func TestBoltPieceCompletionClosedWhenClientClosed(t *testing.T) {
cfg := TestingConfig()
pc, err := storage.NewBoltPieceCompletion(cfg.DataDir)
require.NoError(t, err)
ci := storage.NewFileWithCompletion(cfg.DataDir, pc)
defer ci.Close()
cfg.DefaultStorage = ci
cl, err := NewClient(cfg)
require.NoError(t, err)
cl.Close()
// And again, https://github.com/anacrolix/torrent/issues/158
cl, err = NewClient(cfg)
2016-03-28 18:57:04 +08:00
require.NoError(t, err)
2015-03-08 14:28:14 +08:00
cl.Close()
2014-08-21 16:07:06 +08:00
}
func TestAddDropTorrent(t *testing.T) {
cl, err := NewClient(TestingConfig())
2016-03-28 18:57:04 +08:00
require.NoError(t, err)
2015-03-08 14:28:14 +08:00
defer cl.Close()
dir, mi := testutil.GreetingTestTorrent()
defer os.RemoveAll(dir)
tt, new, err := cl.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
2016-03-28 18:57:04 +08:00
require.NoError(t, err)
assert.True(t, new)
2016-07-05 22:42:16 +08:00
tt.SetMaxEstablishedConns(0)
tt.SetMaxEstablishedConns(1)
tt.Drop()
}
func TestAddTorrentNoSupportedTrackerSchemes(t *testing.T) {
2018-01-09 14:26:46 +08:00
// TODO?
t.SkipNow()
}
func TestAddTorrentNoUsableURLs(t *testing.T) {
2018-01-09 14:26:46 +08:00
// TODO?
t.SkipNow()
}
func TestAddPeersToUnknownTorrent(t *testing.T) {
2018-01-09 14:26:46 +08:00
// TODO?
t.SkipNow()
}
func TestPieceHashSize(t *testing.T) {
2018-01-09 14:26:46 +08:00
assert.Equal(t, 20, pieceHash.Size())
}
func TestTorrentInitialState(t *testing.T) {
dir, mi := testutil.GreetingTestTorrent()
defer os.RemoveAll(dir)
2018-01-28 13:07:11 +08:00
cl := &Client{}
cl.initLogger()
2018-01-28 13:07:11 +08:00
tor := cl.newTorrent(
mi.HashInfoBytes(),
storage.NewFileWithCompletion(tempDir(), storage.NewMapPieceCompletion()),
)
tor.setChunkSize(2)
2017-10-12 13:06:14 +08:00
tor.cl.mu.Lock()
err := tor.setInfoBytes(mi.InfoBytes)
2017-10-12 13:06:14 +08:00
tor.cl.mu.Unlock()
2016-03-28 17:38:30 +08:00
require.NoError(t, err)
require.Len(t, tor.pieces, 3)
2014-12-09 11:59:01 +08:00
tor.pendAllChunkSpecs(0)
tor.cl.mu.Lock()
assert.EqualValues(t, 3, tor.pieceNumPendingChunks(0))
tor.cl.mu.Unlock()
assert.EqualValues(t, chunkSpec{4, 1}, chunkIndexSpec(2, tor.pieceLength(0), tor.chunkSize))
}
2014-06-29 17:07:43 +08:00
func TestUnmarshalPEXMsg(t *testing.T) {
var m peerExchangeMessage
if err := bencode.Unmarshal([]byte("d5:added12:\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0ce"), &m); err != nil {
t.Fatal(err)
}
if len(m.Added) != 2 {
t.FailNow()
}
if m.Added[0].Port != 0x506 {
t.FailNow()
}
}
func TestReducedDialTimeout(t *testing.T) {
cfg := &Config{}
cfg.setDefaults()
for _, _case := range []struct {
Max time.Duration
HalfOpenLimit int
PendingPeers int
ExpectedReduced time.Duration
}{
{cfg.NominalDialTimeout, 40, 0, cfg.NominalDialTimeout},
{cfg.NominalDialTimeout, 40, 1, cfg.NominalDialTimeout},
{cfg.NominalDialTimeout, 40, 39, cfg.NominalDialTimeout},
{cfg.NominalDialTimeout, 40, 40, cfg.NominalDialTimeout / 2},
{cfg.NominalDialTimeout, 40, 80, cfg.NominalDialTimeout / 3},
{cfg.NominalDialTimeout, 40, 4000, cfg.NominalDialTimeout / 101},
} {
reduced := reducedDialTimeout(cfg.MinDialTimeout, _case.Max, _case.HalfOpenLimit, _case.PendingPeers)
2014-11-19 11:53:00 +08:00
expected := _case.ExpectedReduced
if expected < cfg.MinDialTimeout {
expected = cfg.MinDialTimeout
2014-11-19 11:53:00 +08:00
}
if reduced != expected {
t.Fatalf("expected %s, got %s", _case.ExpectedReduced, reduced)
}
}
}
func TestUTPRawConn(t *testing.T) {
2017-06-16 16:08:24 +08:00
l, err := NewUtpSocket("udp", "")
require.NoError(t, err)
defer l.Close()
go func() {
for {
_, err := l.Accept()
if err != nil {
break
}
}
}()
// Connect a UTP peer to see if the RawConn will still work.
s, err := NewUtpSocket("udp", "")
require.NoError(t, err)
2015-08-03 22:48:17 +08:00
defer s.Close()
utpPeer, err := s.DialContext(context.Background(), "", fmt.Sprintf("localhost:%d", missinggo.AddrPort(l.Addr())))
require.NoError(t, err)
defer utpPeer.Close()
peer, err := net.ListenPacket("udp", ":0")
require.NoError(t, err)
defer peer.Close()
msgsReceived := 0
// How many messages to send. I've set this to double the channel buffer
// size in the raw packetConn.
const N = 200
readerStopped := make(chan struct{})
// The reader goroutine.
go func() {
defer close(readerStopped)
b := make([]byte, 500)
for i := 0; i < N; i++ {
n, _, err := l.ReadFrom(b)
require.NoError(t, err)
msgsReceived++
var d int
fmt.Sscan(string(b[:n]), &d)
assert.Equal(t, i, d)
}
}()
2015-08-03 22:29:01 +08:00
udpAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("localhost:%d", missinggo.AddrPort(l.Addr())))
require.NoError(t, err)
for i := 0; i < N; i++ {
2014-12-26 14:19:01 +08:00
_, err := peer.WriteTo([]byte(fmt.Sprintf("%d", i)), udpAddr)
require.NoError(t, err)
time.Sleep(time.Millisecond)
}
select {
case <-readerStopped:
case <-time.After(time.Second):
t.Fatal("reader timed out")
}
if msgsReceived != N {
t.Fatalf("messages received: %d", msgsReceived)
}
}
2015-01-11 18:42:57 +08:00
func TestAddDropManyTorrents(t *testing.T) {
cl, err := NewClient(TestingConfig())
require.NoError(t, err)
2015-03-08 14:28:14 +08:00
defer cl.Close()
for i := range iter.N(1000) {
var spec TorrentSpec
binary.PutVarint(spec.InfoHash[:], int64(i))
tt, new, err := cl.AddTorrentSpec(&spec)
assert.NoError(t, err)
assert.True(t, new)
defer tt.Drop()
}
}
type FileCacheClientStorageFactoryParams struct {
Capacity int64
SetCapacity bool
Wrapper func(*filecache.Cache) storage.ClientImpl
}
func NewFileCacheClientStorageFactory(ps FileCacheClientStorageFactoryParams) storageFactory {
return func(dataDir string) storage.ClientImpl {
fc, err := filecache.NewCache(dataDir)
if err != nil {
panic(err)
}
if ps.SetCapacity {
fc.SetCapacity(ps.Capacity)
}
return ps.Wrapper(fc)
}
}
type storageFactory func(string) storage.ClientImpl
func TestClientTransferDefault(t *testing.T) {
2016-03-28 17:38:30 +08:00
testClientTransfer(t, testClientTransferParams{
ExportClientStatus: true,
LeecherStorage: NewFileCacheClientStorageFactory(FileCacheClientStorageFactoryParams{
Wrapper: fileCachePieceResourceStorage,
}),
2016-03-28 17:38:30 +08:00
})
}
2016-10-10 14:29:39 +08:00
func TestClientTransferRateLimitedUpload(t *testing.T) {
started := time.Now()
testClientTransfer(t, testClientTransferParams{
// We are uploading 13 bytes (the length of the greeting torrent). The
// chunks are 2 bytes in length. Then the smallest burst we can run
// with is 2. Time taken is (13-burst)/rate.
SeederUploadRateLimiter: rate.NewLimiter(11, 2),
ExportClientStatus: true,
})
require.True(t, time.Since(started) > time.Second)
}
2016-10-10 14:29:39 +08:00
func TestClientTransferRateLimitedDownload(t *testing.T) {
testClientTransfer(t, testClientTransferParams{
LeecherDownloadRateLimiter: rate.NewLimiter(512, 512),
})
}
func fileCachePieceResourceStorage(fc *filecache.Cache) storage.ClientImpl {
2016-05-16 20:02:03 +08:00
return storage.NewResourcePieces(fc.AsResourceProvider())
}
func TestClientTransferSmallCache(t *testing.T) {
testClientTransfer(t, testClientTransferParams{
LeecherStorage: NewFileCacheClientStorageFactory(FileCacheClientStorageFactoryParams{
SetCapacity: true,
// Going below the piece length means it can't complete a piece so
// that it can be hashed.
Capacity: 5,
Wrapper: fileCachePieceResourceStorage,
}),
SetReadahead: true,
// Can't readahead too far or the cache will thrash and drop data we
// thought we had.
Readahead: 0,
ExportClientStatus: true,
})
}
func TestClientTransferVarious(t *testing.T) {
2016-10-10 11:57:34 +08:00
// Leecher storage
2016-08-31 15:48:50 +08:00
for _, ls := range []storageFactory{
NewFileCacheClientStorageFactory(FileCacheClientStorageFactoryParams{
Wrapper: fileCachePieceResourceStorage,
}),
storage.NewBoltDB,
2016-03-28 18:57:04 +08:00
} {
2016-10-10 11:57:34 +08:00
// Seeder storage
for _, ss := range []func(string) storage.ClientImpl{
storage.NewFile,
storage.NewMMap,
} {
for _, responsive := range []bool{false, true} {
2016-03-28 18:57:04 +08:00
testClientTransfer(t, testClientTransferParams{
2016-08-31 15:48:50 +08:00
Responsive: responsive,
SeederStorage: ss,
LeecherStorage: ls,
2016-03-28 18:57:04 +08:00
})
for _, readahead := range []int64{-1, 0, 1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14, 15, 20} {
testClientTransfer(t, testClientTransferParams{
2016-08-31 15:48:50 +08:00
SeederStorage: ss,
Responsive: responsive,
SetReadahead: true,
Readahead: readahead,
LeecherStorage: ls,
})
}
2016-03-28 18:57:04 +08:00
}
}
}
}
type testClientTransferParams struct {
2016-10-10 14:29:39 +08:00
Responsive bool
Readahead int64
SetReadahead bool
ExportClientStatus bool
LeecherStorage func(string) storage.ClientImpl
SeederStorage func(string) storage.ClientImpl
SeederUploadRateLimiter *rate.Limiter
LeecherDownloadRateLimiter *rate.Limiter
}
2016-07-05 13:52:33 +08:00
// Creates a seeder and a leecher, and ensures the data transfers when a read
// is attempted on the leecher.
func testClientTransfer(t *testing.T, ps testClientTransferParams) {
greetingTempDir, mi := testutil.GreetingTestTorrent()
defer os.RemoveAll(greetingTempDir)
// Create seeder and a Torrent.
cfg := TestingConfig()
2015-06-16 14:57:47 +08:00
cfg.Seed = true
cfg.UploadRateLimiter = ps.SeederUploadRateLimiter
// cfg.ListenAddr = "localhost:4000"
2016-03-28 18:57:04 +08:00
if ps.SeederStorage != nil {
cfg.DefaultStorage = ps.SeederStorage(greetingTempDir)
defer cfg.DefaultStorage.Close()
2016-03-28 18:57:04 +08:00
} else {
cfg.DataDir = greetingTempDir
}
seeder, err := NewClient(cfg)
2016-02-21 19:08:01 +08:00
require.NoError(t, err)
if ps.ExportClientStatus {
testutil.ExportStatusWriter(seeder, "s")
}
seederTorrent, _, _ := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
2017-12-02 06:58:08 +08:00
// Run a Stats right after Closing the Client. This will trigger the Stats
// panic in #214 caused by RemoteAddr on Closed uTP sockets.
defer seederTorrent.Stats()
defer seeder.Close()
seederTorrent.VerifyData()
// Create leecher and a Torrent.
leecherDataDir, err := ioutil.TempDir("", "")
2016-02-21 19:08:01 +08:00
require.NoError(t, err)
defer os.RemoveAll(leecherDataDir)
if ps.LeecherStorage == nil {
cfg.DataDir = leecherDataDir
} else {
cfg.DefaultStorage = ps.LeecherStorage(leecherDataDir)
}
2016-10-10 14:29:39 +08:00
cfg.DownloadRateLimiter = ps.LeecherDownloadRateLimiter
2018-01-29 15:20:48 +08:00
cfg.Seed = false
leecher, err := NewClient(cfg)
2016-02-26 19:10:09 +08:00
require.NoError(t, err)
2015-03-08 14:28:14 +08:00
defer leecher.Close()
if ps.ExportClientStatus {
testutil.ExportStatusWriter(leecher, "l")
}
2018-01-29 15:20:48 +08:00
leecherTorrent, new, err := leecher.AddTorrentSpec(func() (ret *TorrentSpec) {
ret = TorrentSpecFromMetaInfo(mi)
ret.ChunkSize = 2
return
}())
2016-02-21 19:08:01 +08:00
require.NoError(t, err)
assert.True(t, new)
// Now do some things with leecher and seeder.
leecherTorrent.AddClientPeer(seeder)
2018-01-29 15:20:48 +08:00
// The Torrent should not be interested in obtaining peers, so the one we
// just added should be the only one.
assert.False(t, leecherTorrent.Seeding())
assert.EqualValues(t, 1, leecherTorrent.Stats().PendingPeers)
r := leecherTorrent.NewReader()
defer r.Close()
if ps.Responsive {
r.SetResponsive()
}
if ps.SetReadahead {
r.SetReadahead(ps.Readahead)
}
2016-07-05 13:52:33 +08:00
assertReadAllGreeting(t, r)
2018-02-02 21:41:13 +08:00
assert.True(t, 13 <= seederTorrent.Stats().BytesWrittenData)
2018-01-29 15:20:48 +08:00
assert.True(t, 8 <= seederTorrent.Stats().ChunksWritten)
2018-02-02 21:41:13 +08:00
assert.True(t, 13 <= leecherTorrent.Stats().BytesReadData)
2018-01-29 15:20:48 +08:00
assert.True(t, 8 <= leecherTorrent.Stats().ChunksRead)
// Try reading through again for the cases where the torrent data size
// exceeds the size of the cache.
2016-07-05 13:52:33 +08:00
assertReadAllGreeting(t, r)
}
func assertReadAllGreeting(t *testing.T, r io.ReadSeeker) {
2017-11-07 13:11:59 +08:00
pos, err := r.Seek(0, io.SeekStart)
2016-07-05 13:52:33 +08:00
assert.NoError(t, err)
assert.EqualValues(t, 0, pos)
_greeting, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.EqualValues(t, testutil.GreetingFileContents, _greeting)
}
2015-08-12 14:51:12 +08:00
// Check that after completing leeching, a leecher transitions to a seeding
// correctly. Connected in a chain like so: Seeder <-> Leecher <-> LeecherLeecher.
func TestSeedAfterDownloading(t *testing.T) {
greetingTempDir, mi := testutil.GreetingTestTorrent()
defer os.RemoveAll(greetingTempDir)
cfg := TestingConfig()
2015-08-12 14:51:12 +08:00
cfg.Seed = true
cfg.DataDir = greetingTempDir
seeder, err := NewClient(cfg)
2016-03-30 16:16:40 +08:00
require.NoError(t, err)
2015-08-12 14:51:12 +08:00
defer seeder.Close()
testutil.ExportStatusWriter(seeder, "s")
seederTorrent, _, _ := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
seederTorrent.VerifyData()
2015-08-12 14:51:12 +08:00
cfg.DataDir, err = ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(cfg.DataDir)
leecher, err := NewClient(cfg)
2016-03-30 16:16:40 +08:00
require.NoError(t, err)
2015-08-12 14:51:12 +08:00
defer leecher.Close()
testutil.ExportStatusWriter(leecher, "l")
2015-08-12 14:51:12 +08:00
cfg.Seed = false
cfg.DataDir, err = ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(cfg.DataDir)
leecherLeecher, _ := NewClient(cfg)
require.NoError(t, err)
2015-08-12 14:51:12 +08:00
defer leecherLeecher.Close()
testutil.ExportStatusWriter(leecherLeecher, "ll")
2015-08-12 14:51:12 +08:00
leecherGreeting, _, _ := leecher.AddTorrentSpec(func() (ret *TorrentSpec) {
ret = TorrentSpecFromMetaInfo(mi)
ret.ChunkSize = 2
return
}())
llg, _, _ := leecherLeecher.AddTorrentSpec(func() (ret *TorrentSpec) {
ret = TorrentSpecFromMetaInfo(mi)
ret.ChunkSize = 3
return
}())
// Simultaneously DownloadAll in Leecher, and read the contents
// consecutively in LeecherLeecher. This non-deterministically triggered a
// case where the leecher wouldn't unchoke the LeecherLeecher.
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
r := llg.NewReader()
defer r.Close()
b, err := ioutil.ReadAll(r)
require.NoError(t, err)
2016-01-18 22:41:33 +08:00
assert.EqualValues(t, testutil.GreetingFileContents, b)
2015-08-12 14:51:12 +08:00
}()
leecherGreeting.AddClientPeer(seeder)
leecherGreeting.AddClientPeer(leecherLeecher)
2015-08-12 14:51:12 +08:00
wg.Add(1)
go func() {
defer wg.Done()
leecherGreeting.DownloadAll()
leecher.WaitAll()
}()
wg.Wait()
}
func TestMergingTrackersByAddingSpecs(t *testing.T) {
cl, err := NewClient(TestingConfig())
require.NoError(t, err)
defer cl.Close()
spec := TorrentSpec{}
T, new, _ := cl.AddTorrentSpec(&spec)
if !new {
t.FailNow()
}
spec.Trackers = [][]string{{"http://a"}, {"udp://b"}}
_, new, _ = cl.AddTorrentSpec(&spec)
assert.False(t, new)
assert.EqualValues(t, [][]string{{"http://a"}, {"udp://b"}}, T.metainfo.AnnounceList)
// Because trackers are disabled in TestingConfig.
assert.EqualValues(t, 0, len(T.trackerAnnouncers))
}
2015-06-02 22:16:38 +08:00
// We read from a piece which is marked completed, but is missing data.
func TestCompletedPieceWrongSize(t *testing.T) {
cfg := TestingConfig()
2016-03-28 17:38:30 +08:00
cfg.DefaultStorage = badStorage{}
cl, err := NewClient(cfg)
require.NoError(t, err)
2015-06-02 22:16:38 +08:00
defer cl.Close()
info := metainfo.Info{
PieceLength: 15,
Pieces: make([]byte, 20),
Files: []metainfo.FileInfo{
2016-11-22 11:01:09 +08:00
{Path: []string{"greeting"}, Length: 13},
2015-06-02 22:16:38 +08:00
},
2016-05-09 13:47:39 +08:00
}
b, err := bencode.Marshal(info)
2017-11-07 13:11:59 +08:00
require.NoError(t, err)
2016-05-09 13:47:39 +08:00
tt, new, err := cl.AddTorrentSpec(&TorrentSpec{
InfoBytes: b,
InfoHash: metainfo.HashBytes(b),
2015-06-02 22:16:38 +08:00
})
require.NoError(t, err)
defer tt.Drop()
assert.True(t, new)
2015-06-02 22:16:38 +08:00
r := tt.NewReader()
defer r.Close()
b, err = ioutil.ReadAll(r)
assert.Len(t, b, 13)
assert.NoError(t, err)
2015-06-02 22:16:38 +08:00
}
func BenchmarkAddLargeTorrent(b *testing.B) {
cfg := TestingConfig()
cfg.DisableTCP = true
cfg.DisableUTP = true
cfg.ListenAddr = "redonk"
cl, err := NewClient(cfg)
require.NoError(b, err)
defer cl.Close()
for range iter.N(b.N) {
t, err := cl.AddTorrentFromFile("testdata/bootstrap.dat.torrent")
if err != nil {
b.Fatal(err)
}
t.Drop()
}
}
2015-07-15 13:51:42 +08:00
func TestResponsive(t *testing.T) {
seederDataDir, mi := testutil.GreetingTestTorrent()
defer os.RemoveAll(seederDataDir)
cfg := TestingConfig()
2015-07-15 13:51:42 +08:00
cfg.Seed = true
cfg.DataDir = seederDataDir
seeder, err := NewClient(cfg)
2015-07-15 13:51:42 +08:00
require.Nil(t, err)
defer seeder.Close()
seederTorrent, _, _ := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
seederTorrent.VerifyData()
2015-07-15 13:51:42 +08:00
leecherDataDir, err := ioutil.TempDir("", "")
require.Nil(t, err)
defer os.RemoveAll(leecherDataDir)
cfg = TestingConfig()
2015-07-15 13:51:42 +08:00
cfg.DataDir = leecherDataDir
leecher, err := NewClient(cfg)
2015-07-15 13:51:42 +08:00
require.Nil(t, err)
defer leecher.Close()
leecherTorrent, _, _ := leecher.AddTorrentSpec(func() (ret *TorrentSpec) {
ret = TorrentSpecFromMetaInfo(mi)
ret.ChunkSize = 2
return
}())
leecherTorrent.AddClientPeer(seeder)
2015-07-15 13:51:42 +08:00
reader := leecherTorrent.NewReader()
2016-02-04 22:17:26 +08:00
defer reader.Close()
2015-07-15 13:51:42 +08:00
reader.SetReadahead(0)
reader.SetResponsive()
b := make([]byte, 2)
2017-11-07 13:11:59 +08:00
_, err = reader.Seek(3, io.SeekStart)
require.NoError(t, err)
_, err = io.ReadFull(reader, b)
2015-07-15 13:51:42 +08:00
assert.Nil(t, err)
assert.EqualValues(t, "lo", string(b))
2017-11-07 13:11:59 +08:00
_, err = reader.Seek(11, io.SeekStart)
require.NoError(t, err)
n, err := io.ReadFull(reader, b)
2015-07-15 13:51:42 +08:00
assert.Nil(t, err)
assert.EqualValues(t, 2, n)
assert.EqualValues(t, "d\n", string(b))
}
func TestTorrentDroppedDuringResponsiveRead(t *testing.T) {
seederDataDir, mi := testutil.GreetingTestTorrent()
defer os.RemoveAll(seederDataDir)
cfg := TestingConfig()
cfg.Seed = true
cfg.DataDir = seederDataDir
seeder, err := NewClient(cfg)
require.Nil(t, err)
defer seeder.Close()
seederTorrent, _, _ := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
seederTorrent.VerifyData()
leecherDataDir, err := ioutil.TempDir("", "")
require.Nil(t, err)
defer os.RemoveAll(leecherDataDir)
cfg = TestingConfig()
cfg.DataDir = leecherDataDir
leecher, err := NewClient(cfg)
require.Nil(t, err)
defer leecher.Close()
leecherTorrent, _, _ := leecher.AddTorrentSpec(func() (ret *TorrentSpec) {
ret = TorrentSpecFromMetaInfo(mi)
ret.ChunkSize = 2
return
}())
leecherTorrent.AddClientPeer(seeder)
reader := leecherTorrent.NewReader()
2016-02-04 22:17:26 +08:00
defer reader.Close()
reader.SetReadahead(0)
reader.SetResponsive()
b := make([]byte, 2)
2017-11-07 13:11:59 +08:00
_, err = reader.Seek(3, io.SeekStart)
require.NoError(t, err)
_, err = io.ReadFull(reader, b)
assert.Nil(t, err)
assert.EqualValues(t, "lo", string(b))
go leecherTorrent.Drop()
2017-11-07 13:11:59 +08:00
_, err = reader.Seek(11, io.SeekStart)
require.NoError(t, err)
n, err := reader.Read(b)
assert.EqualError(t, err, "torrent closed")
assert.EqualValues(t, 0, n)
}
func TestDHTInheritBlocklist(t *testing.T) {
ipl := iplist.New(nil)
require.NotNil(t, ipl)
cfg := TestingConfig()
2016-01-16 21:12:53 +08:00
cfg.IPBlocklist = ipl
cfg.NoDHT = false
cl, err := NewClient(cfg)
require.NoError(t, err)
defer cl.Close()
numServers := 0
cl.eachDhtServer(func(s *dht.Server) {
assert.Equal(t, ipl, s.IPBlocklist())
numServers++
})
assert.EqualValues(t, 2, numServers)
}
2015-08-23 10:50:32 +08:00
// Check that stuff is merged in subsequent AddTorrentSpec for the same
// infohash.
func TestAddTorrentSpecMerging(t *testing.T) {
cl, err := NewClient(TestingConfig())
2015-08-23 10:50:32 +08:00
require.NoError(t, err)
defer cl.Close()
dir, mi := testutil.GreetingTestTorrent()
defer os.RemoveAll(dir)
tt, new, err := cl.AddTorrentSpec(&TorrentSpec{
InfoHash: mi.HashInfoBytes(),
})
2015-08-23 10:50:32 +08:00
require.NoError(t, err)
require.True(t, new)
require.Nil(t, tt.Info())
_, new, err = cl.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
require.NoError(t, err)
require.False(t, new)
require.NotNil(t, tt.Info())
}
func TestTorrentDroppedBeforeGotInfo(t *testing.T) {
dir, mi := testutil.GreetingTestTorrent()
os.RemoveAll(dir)
cl, _ := NewClient(TestingConfig())
defer cl.Close()
tt, _, _ := cl.AddTorrentSpec(&TorrentSpec{
InfoHash: mi.HashInfoBytes(),
})
tt.Drop()
assert.EqualValues(t, 0, len(cl.Torrents()))
select {
case <-tt.GotInfo():
t.FailNow()
default:
}
}
func writeTorrentData(ts *storage.Torrent, info metainfo.Info, b []byte) {
2016-03-29 08:14:34 +08:00
for i := range iter.N(info.NumPieces()) {
p := info.Piece(i)
ts.Piece(p).WriteAt(b[p.Offset():p.Offset()+p.Length()], 0)
2016-03-29 08:14:34 +08:00
}
}
func testAddTorrentPriorPieceCompletion(t *testing.T, alreadyCompleted bool, csf func(*filecache.Cache) storage.ClientImpl) {
2016-03-29 08:14:34 +08:00
fileCacheDir, err := ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(fileCacheDir)
fileCache, err := filecache.NewCache(fileCacheDir)
require.NoError(t, err)
greetingDataTempDir, greetingMetainfo := testutil.GreetingTestTorrent()
defer os.RemoveAll(greetingDataTempDir)
filePieceStore := csf(fileCache)
defer filePieceStore.Close()
info, err := greetingMetainfo.UnmarshalInfo()
require.NoError(t, err)
ih := greetingMetainfo.HashInfoBytes()
greetingData, err := storage.NewClient(filePieceStore).OpenTorrent(&info, ih)
2016-03-29 08:14:34 +08:00
require.NoError(t, err)
writeTorrentData(greetingData, info, []byte(testutil.GreetingFileContents))
2016-03-29 08:14:34 +08:00
// require.Equal(t, len(testutil.GreetingFileContents), written)
// require.NoError(t, err)
for i := 0; i < info.NumPieces(); i++ {
p := info.Piece(i)
2016-03-29 08:14:34 +08:00
if alreadyCompleted {
require.NoError(t, greetingData.Piece(p).MarkComplete())
2016-03-29 08:14:34 +08:00
}
}
cfg := TestingConfig()
2016-03-29 08:14:34 +08:00
// TODO: Disable network option?
cfg.DisableTCP = true
cfg.DisableUTP = true
cfg.DefaultStorage = filePieceStore
cl, err := NewClient(cfg)
2016-03-29 08:14:34 +08:00
require.NoError(t, err)
defer cl.Close()
tt, err := cl.AddTorrent(greetingMetainfo)
require.NoError(t, err)
psrs := tt.PieceStateRuns()
assert.Len(t, psrs, 1)
assert.EqualValues(t, 3, psrs[0].Length)
assert.Equal(t, alreadyCompleted, psrs[0].Complete)
if alreadyCompleted {
r := tt.NewReader()
b, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.EqualValues(t, testutil.GreetingFileContents, b)
}
}
func TestAddTorrentPiecesAlreadyCompleted(t *testing.T) {
testAddTorrentPriorPieceCompletion(t, true, fileCachePieceResourceStorage)
2016-03-29 08:14:34 +08:00
}
func TestAddTorrentPiecesNotAlreadyCompleted(t *testing.T) {
testAddTorrentPriorPieceCompletion(t, false, fileCachePieceResourceStorage)
2016-03-29 08:14:34 +08:00
}
func TestAddMetainfoWithNodes(t *testing.T) {
cfg := TestingConfig()
2017-08-07 20:11:25 +08:00
cfg.ListenAddr = ":0"
cfg.NoDHT = false
cfg.DhtStartingNodes = func() ([]dht.Addr, error) { return nil, nil }
// For now, we want to just jam the nodes into the table, without
// verifying them first. Also the DHT code doesn't support mixing secure
// and insecure nodes if security is enabled (yet).
// cfg.DHTConfig.NoSecurity = true
cl, err := NewClient(cfg)
require.NoError(t, err)
defer cl.Close()
sum := func() (ret int) {
cl.eachDhtServer(func(s *dht.Server) {
ret += s.NumNodes()
ret += s.Stats().OutstandingTransactions
})
return
}
assert.EqualValues(t, 0, sum())
tt, err := cl.AddTorrentFromFile("metainfo/testdata/issue_65a.torrent")
require.NoError(t, err)
2017-07-20 22:40:49 +08:00
// Nodes are not added or exposed in Torrent's metainfo. We just randomly
// check if the announce-list is here instead. TODO: Add nodes.
assert.Len(t, tt.metainfo.AnnounceList, 5)
2017-07-20 22:40:49 +08:00
// There are 6 nodes in the torrent file.
assert.EqualValues(t, 6*len(cl.dhtServers), sum())
}
2016-02-26 19:10:29 +08:00
type testDownloadCancelParams struct {
ExportClientStatus bool
SetLeecherStorageCapacity bool
LeecherStorageCapacity int64
Cancel bool
}
func testDownloadCancel(t *testing.T, ps testDownloadCancelParams) {
greetingTempDir, mi := testutil.GreetingTestTorrent()
defer os.RemoveAll(greetingTempDir)
cfg := TestingConfig()
2016-02-26 19:10:29 +08:00
cfg.Seed = true
cfg.DataDir = greetingTempDir
seeder, err := NewClient(cfg)
2016-02-26 19:10:29 +08:00
require.NoError(t, err)
defer seeder.Close()
if ps.ExportClientStatus {
testutil.ExportStatusWriter(seeder, "s")
}
seederTorrent, _, _ := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
seederTorrent.VerifyData()
2016-02-26 19:10:29 +08:00
leecherDataDir, err := ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(leecherDataDir)
2016-03-29 08:14:34 +08:00
fc, err := filecache.NewCache(leecherDataDir)
require.NoError(t, err)
if ps.SetLeecherStorageCapacity {
fc.SetCapacity(ps.LeecherStorageCapacity)
}
cfg.DefaultStorage = storage.NewResourcePieces(fc.AsResourceProvider())
2016-03-28 17:38:30 +08:00
cfg.DataDir = leecherDataDir
leecher, _ := NewClient(cfg)
2016-02-26 19:10:29 +08:00
defer leecher.Close()
if ps.ExportClientStatus {
testutil.ExportStatusWriter(leecher, "l")
}
leecherGreeting, new, err := leecher.AddTorrentSpec(func() (ret *TorrentSpec) {
ret = TorrentSpecFromMetaInfo(mi)
ret.ChunkSize = 2
return
}())
require.NoError(t, err)
assert.True(t, new)
psc := leecherGreeting.SubscribePieceStateChanges()
defer psc.Close()
2018-01-27 11:31:31 +08:00
leecherGreeting.cl.mu.Lock()
leecherGreeting.downloadPiecesLocked(0, leecherGreeting.numPieces())
2016-02-26 19:10:29 +08:00
if ps.Cancel {
2018-01-27 11:31:31 +08:00
leecherGreeting.cancelPiecesLocked(0, leecherGreeting.NumPieces())
2016-02-26 19:10:29 +08:00
}
2018-01-27 11:31:31 +08:00
leecherGreeting.cl.mu.Unlock()
leecherGreeting.AddClientPeer(seeder)
2016-02-26 19:10:29 +08:00
completes := make(map[int]bool, 3)
values:
for {
2016-03-28 17:38:30 +08:00
// started := time.Now()
2016-02-26 19:10:29 +08:00
select {
case _v := <-psc.Values:
2016-03-28 17:38:30 +08:00
// log.Print(time.Since(started))
2016-02-26 19:10:29 +08:00
v := _v.(PieceStateChange)
completes[v.Index] = v.Complete
2016-02-26 19:18:08 +08:00
case <-time.After(100 * time.Millisecond):
2016-02-26 19:10:29 +08:00
break values
}
}
if ps.Cancel {
assert.EqualValues(t, map[int]bool{0: false, 1: false, 2: false}, completes)
} else {
assert.EqualValues(t, map[int]bool{0: true, 1: true, 2: true}, completes)
}
}
func TestTorrentDownloadAll(t *testing.T) {
testDownloadCancel(t, testDownloadCancelParams{})
}
func TestTorrentDownloadAllThenCancel(t *testing.T) {
testDownloadCancel(t, testDownloadCancelParams{
Cancel: true,
})
}
// Ensure that it's an error for a peer to send an invalid have message.
func TestPeerInvalidHave(t *testing.T) {
cl, err := NewClient(TestingConfig())
require.NoError(t, err)
defer cl.Close()
info := metainfo.Info{
PieceLength: 1,
Pieces: make([]byte, 20),
Files: []metainfo.FileInfo{{Length: 1}},
2016-05-09 13:47:39 +08:00
}
infoBytes, err := bencode.Marshal(info)
require.NoError(t, err)
2016-05-09 13:47:39 +08:00
tt, _new, err := cl.AddTorrentSpec(&TorrentSpec{
InfoBytes: infoBytes,
InfoHash: metainfo.HashBytes(infoBytes),
2017-09-18 10:15:14 +08:00
Storage: badStorage{},
})
require.NoError(t, err)
assert.True(t, _new)
defer tt.Drop()
cn := &connection{
2016-04-03 16:40:43 +08:00
t: tt,
}
assert.NoError(t, cn.peerSentHave(0))
assert.Error(t, cn.peerSentHave(1))
}
2016-03-28 17:38:30 +08:00
func TestPieceCompletedInStorageButNotClient(t *testing.T) {
greetingTempDir, greetingMetainfo := testutil.GreetingTestTorrent()
defer os.RemoveAll(greetingTempDir)
cfg := TestingConfig()
2016-03-28 17:38:30 +08:00
cfg.DataDir = greetingTempDir
seeder, err := NewClient(TestingConfig())
2016-03-28 17:38:30 +08:00
require.NoError(t, err)
seeder.AddTorrentSpec(&TorrentSpec{
InfoBytes: greetingMetainfo.InfoBytes,
2016-03-28 17:38:30 +08:00
})
}
2016-05-24 17:35:23 +08:00
// Check that when the listen port is 0, all the protocols listened on have
// the same port, and it isn't zero.
func TestClientDynamicListenPortAllProtocols(t *testing.T) {
cl, err := NewClient(TestingConfig())
2016-05-24 17:35:23 +08:00
require.NoError(t, err)
defer cl.Close()
port := cl.LocalPort()
assert.NotEqual(t, 0, port)
cl.eachListener(func(s socket) bool {
assert.Equal(t, port, missinggo.AddrPort(s.Addr()))
return true
})
2016-05-24 17:35:23 +08:00
}
func TestClientDynamicListenTCPOnly(t *testing.T) {
cfg := TestingConfig()
2016-05-24 17:35:23 +08:00
cfg.DisableUTP = true
cl, err := NewClient(cfg)
2016-05-24 17:35:23 +08:00
require.NoError(t, err)
defer cl.Close()
assert.NotEqual(t, 0, cl.LocalPort())
cl.eachListener(func(s socket) bool {
assert.True(t, isTcpNetwork(s.Addr().Network()))
return true
})
2016-05-24 17:35:23 +08:00
}
func TestClientDynamicListenUTPOnly(t *testing.T) {
cfg := TestingConfig()
2016-05-24 17:35:23 +08:00
cfg.DisableTCP = true
cl, err := NewClient(cfg)
2016-05-24 17:35:23 +08:00
require.NoError(t, err)
defer cl.Close()
assert.NotEqual(t, 0, cl.LocalPort())
cl.eachListener(func(s socket) bool {
assert.True(t, isUtpNetwork(s.Addr().Network()))
return true
})
2016-05-24 17:35:23 +08:00
}
func TestClientDynamicListenPortNoProtocols(t *testing.T) {
cfg := TestingConfig()
2016-05-24 17:35:23 +08:00
cfg.DisableTCP = true
cfg.DisableUTP = true
cl, err := NewClient(cfg)
2016-05-24 17:35:23 +08:00
require.NoError(t, err)
defer cl.Close()
assert.Equal(t, 0, cl.LocalPort())
}
func totalConns(tts []*Torrent) (ret int) {
for _, tt := range tts {
tt.cl.mu.Lock()
ret += len(tt.conns)
tt.cl.mu.Unlock()
}
return
}
func TestSetMaxEstablishedConn(t *testing.T) {
ss := testutil.NewStatusServer(t)
defer ss.Close()
var tts []*Torrent
ih := testutil.GreetingMetaInfo().HashInfoBytes()
for i := range iter.N(3) {
cl, err := NewClient(TestingConfig())
require.NoError(t, err)
defer cl.Close()
tt, _ := cl.AddTorrentInfoHash(ih)
tt.SetMaxEstablishedConns(2)
ss.HandleStatusWriter(cl, fmt.Sprintf("/%d", i))
tts = append(tts, tt)
}
addPeers := func() {
for _, tt := range tts {
for _, _tt := range tts {
// if tt != _tt {
tt.AddClientPeer(_tt.cl)
// }
}
}
}
waitTotalConns := func(num int) {
for totalConns(tts) != num {
addPeers()
time.Sleep(time.Millisecond)
}
}
addPeers()
waitTotalConns(6)
tts[0].SetMaxEstablishedConns(1)
waitTotalConns(4)
tts[0].SetMaxEstablishedConns(0)
waitTotalConns(2)
tts[0].SetMaxEstablishedConns(1)
addPeers()
waitTotalConns(4)
tts[0].SetMaxEstablishedConns(2)
addPeers()
waitTotalConns(6)
}
2016-09-16 22:01:15 +08:00
func makeMagnet(t *testing.T, cl *Client, dir string, name string) string {
os.MkdirAll(dir, 0770)
file, err := os.Create(filepath.Join(dir, name))
2016-09-16 22:01:15 +08:00
require.NoError(t, err)
file.Write([]byte(name))
2016-09-20 16:39:36 +08:00
file.Close()
2016-09-16 22:01:15 +08:00
mi := metainfo.MetaInfo{}
mi.SetDefaults()
2016-09-20 16:39:36 +08:00
info := metainfo.Info{PieceLength: 256 * 1024}
err = info.BuildFromFilePath(filepath.Join(dir, name))
2016-09-16 22:01:15 +08:00
require.NoError(t, err)
mi.InfoBytes, err = bencode.Marshal(info)
require.NoError(t, err)
magnet := mi.Magnet(name, mi.HashInfoBytes()).String()
tr, err := cl.AddTorrent(&mi)
require.NoError(t, err)
require.True(t, tr.Seeding())
tr.VerifyData()
2016-09-16 22:01:15 +08:00
return magnet
}
// https://github.com/anacrolix/torrent/issues/114
func TestMultipleTorrentsWithEncryption(t *testing.T) {
cfg := TestingConfig()
2016-09-16 22:01:15 +08:00
cfg.DisableUTP = true
cfg.Seed = true
cfg.DataDir = filepath.Join(cfg.DataDir, "server")
2016-09-16 22:01:15 +08:00
cfg.ForceEncryption = true
os.Mkdir(cfg.DataDir, 0755)
server, err := NewClient(cfg)
2016-09-16 22:01:15 +08:00
require.NoError(t, err)
defer server.Close()
testutil.ExportStatusWriter(server, "s")
2016-09-16 22:01:15 +08:00
magnet1 := makeMagnet(t, server, cfg.DataDir, "test1")
makeMagnet(t, server, cfg.DataDir, "test2")
cfg = TestingConfig()
2016-09-16 22:01:15 +08:00
cfg.DisableUTP = true
cfg.DataDir = filepath.Join(cfg.DataDir, "client")
2016-09-16 22:01:15 +08:00
cfg.ForceEncryption = true
client, err := NewClient(cfg)
2016-09-16 22:01:15 +08:00
require.NoError(t, err)
defer client.Close()
testutil.ExportStatusWriter(client, "c")
2016-09-16 22:01:15 +08:00
tr, err := client.AddMagnet(magnet1)
require.NoError(t, err)
tr.AddClientPeer(server)
2016-09-20 16:39:36 +08:00
<-tr.GotInfo()
2016-09-16 22:01:15 +08:00
tr.DownloadAll()
client.WaitAll()
}
func TestClientAddressInUse(t *testing.T) {
s, _ := NewUtpSocket("udp", ":50007")
if s != nil {
defer s.Close()
}
cfg := TestingConfig()
cfg.ListenAddr = ":50007"
cl, err := NewClient(cfg)
require.Error(t, err)
require.Nil(t, cl)
}