2014-04-08 14:45:33 +08:00
|
|
|
package torrent
|
|
|
|
|
|
|
|
import (
|
2018-04-12 09:41:07 +08:00
|
|
|
"fmt"
|
2017-11-08 16:31:10 +08:00
|
|
|
"net"
|
2016-09-12 14:53:20 +08:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2014-04-08 14:45:33 +08:00
|
|
|
"testing"
|
2014-08-28 06:03:55 +08:00
|
|
|
|
2016-09-12 14:53:20 +08:00
|
|
|
"github.com/anacrolix/missinggo"
|
2019-08-21 18:58:40 +08:00
|
|
|
"github.com/bradfitz/iter"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2016-09-12 14:53:20 +08:00
|
|
|
"github.com/anacrolix/torrent/bencode"
|
2016-12-06 12:41:08 +08:00
|
|
|
"github.com/anacrolix/torrent/internal/testutil"
|
2016-08-30 12:21:50 +08:00
|
|
|
"github.com/anacrolix/torrent/metainfo"
|
2017-11-08 16:31:10 +08:00
|
|
|
pp "github.com/anacrolix/torrent/peer_protocol"
|
2016-09-12 15:11:32 +08:00
|
|
|
"github.com/anacrolix/torrent/storage"
|
2014-04-08 14:45:33 +08:00
|
|
|
)
|
|
|
|
|
2017-11-08 16:31:10 +08:00
|
|
|
func r(i, b, l pp.Integer) request {
|
2014-04-16 19:13:44 +08:00
|
|
|
return request{i, chunkSpec{b, l}}
|
2014-04-08 14:45:33 +08:00
|
|
|
}
|
|
|
|
|
2020-06-02 15:41:59 +08:00
|
|
|
// Check the given request is correct for various torrent offsets.
|
2014-04-08 14:45:33 +08:00
|
|
|
func TestTorrentRequest(t *testing.T) {
|
|
|
|
const s = 472183431 // Length of torrent.
|
|
|
|
for _, _case := range []struct {
|
|
|
|
off int64 // An offset into the torrent.
|
2020-06-02 15:41:59 +08:00
|
|
|
req request // The expected request. The zero value means !ok.
|
2014-04-08 14:45:33 +08:00
|
|
|
}{
|
|
|
|
// Invalid offset.
|
2014-04-16 19:13:44 +08:00
|
|
|
{-1, request{}},
|
2014-04-08 14:45:33 +08:00
|
|
|
{0, r(0, 0, 16384)},
|
|
|
|
// One before the end of a piece.
|
|
|
|
{1<<18 - 1, r(0, 1<<18-16384, 16384)},
|
|
|
|
// Offset beyond torrent length.
|
2014-04-16 19:13:44 +08:00
|
|
|
{472 * 1 << 20, request{}},
|
2014-04-08 14:45:33 +08:00
|
|
|
// One before the end of the torrent. Complicates the chunk length.
|
|
|
|
{s - 1, r((s-1)/(1<<18), (s-1)%(1<<18)/(16384)*(16384), 12935)},
|
|
|
|
{1, r(0, 0, 16384)},
|
|
|
|
// One before end of chunk.
|
|
|
|
{16383, r(0, 0, 16384)},
|
|
|
|
// Second chunk.
|
|
|
|
{16384, r(0, 16384, 16384)},
|
|
|
|
} {
|
|
|
|
req, ok := torrentOffsetRequest(472183431, 1<<18, 16384, _case.off)
|
2014-04-16 19:13:44 +08:00
|
|
|
if (_case.req == request{}) == ok {
|
2014-04-08 14:45:33 +08:00
|
|
|
t.Fatalf("expected %v, got %v", _case.req, req)
|
|
|
|
}
|
|
|
|
if req != _case.req {
|
|
|
|
t.Fatalf("expected %v, got %v", _case.req, req)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-08-28 06:03:55 +08:00
|
|
|
|
2014-09-11 18:31:31 +08:00
|
|
|
func TestAppendToCopySlice(t *testing.T) {
|
|
|
|
orig := []int{1, 2, 3}
|
|
|
|
dupe := append([]int{}, orig...)
|
|
|
|
dupe[0] = 4
|
|
|
|
if orig[0] != 1 {
|
|
|
|
t.FailNow()
|
|
|
|
}
|
|
|
|
}
|
2014-12-02 06:34:45 +08:00
|
|
|
|
|
|
|
func TestTorrentString(t *testing.T) {
|
2016-04-03 16:40:43 +08:00
|
|
|
tor := &Torrent{}
|
|
|
|
s := tor.InfoHash().HexString()
|
2014-12-02 06:34:45 +08:00
|
|
|
if s != "0000000000000000000000000000000000000000" {
|
|
|
|
t.FailNow()
|
|
|
|
}
|
|
|
|
}
|
2016-08-30 12:21:50 +08:00
|
|
|
|
|
|
|
// This benchmark is from the observation that a lot of overlapping Readers on
|
|
|
|
// a large torrent with small pieces had a lot of overhead in recalculating
|
|
|
|
// piece priorities everytime a reader (possibly in another Torrent) changed.
|
|
|
|
func BenchmarkUpdatePiecePriorities(b *testing.B) {
|
2018-01-25 14:10:37 +08:00
|
|
|
const (
|
|
|
|
numPieces = 13410
|
|
|
|
pieceLength = 256 << 10
|
|
|
|
)
|
2020-01-10 14:22:46 +08:00
|
|
|
cl := &Client{config: TestingConfig()}
|
2018-01-29 16:16:55 +08:00
|
|
|
cl.initLogger()
|
2017-03-16 22:24:54 +08:00
|
|
|
t := cl.newTorrent(metainfo.Hash{}, nil)
|
2018-01-25 14:10:37 +08:00
|
|
|
require.NoError(b, t.setInfo(&metainfo.Info{
|
|
|
|
Pieces: make([]byte, metainfo.HashSize*numPieces),
|
|
|
|
PieceLength: pieceLength,
|
|
|
|
Length: pieceLength * numPieces,
|
|
|
|
}))
|
2016-08-30 12:21:50 +08:00
|
|
|
assert.EqualValues(b, 13410, t.numPieces())
|
|
|
|
for range iter.N(7) {
|
|
|
|
r := t.NewReader()
|
|
|
|
r.SetReadahead(32 << 20)
|
|
|
|
r.Seek(3500000, 0)
|
|
|
|
}
|
|
|
|
assert.Len(b, t.readers, 7)
|
2018-07-12 07:15:15 +08:00
|
|
|
for i := 0; i < int(t.numPieces()); i += 3 {
|
2020-01-10 12:09:21 +08:00
|
|
|
t._completedPieces.Set(i, true)
|
2016-08-30 12:21:50 +08:00
|
|
|
}
|
2018-01-25 14:18:36 +08:00
|
|
|
t.DownloadPieces(0, t.numPieces())
|
2016-08-30 12:21:50 +08:00
|
|
|
for range iter.N(b.N) {
|
2016-10-31 16:00:08 +08:00
|
|
|
t.updateAllPiecePriorities()
|
2016-08-30 12:21:50 +08:00
|
|
|
}
|
|
|
|
}
|
2016-09-12 14:53:20 +08:00
|
|
|
|
2016-09-12 15:11:32 +08:00
|
|
|
// Check that a torrent containing zero-length file(s) will start, and that
|
|
|
|
// they're created in the filesystem. The client storage is assumed to be
|
|
|
|
// file-based on the native filesystem based.
|
2018-06-16 15:01:21 +08:00
|
|
|
func testEmptyFilesAndZeroPieceLength(t *testing.T, cfg *ClientConfig) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cl, err := NewClient(cfg)
|
2016-09-12 14:53:20 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer cl.Close()
|
|
|
|
ib, err := bencode.Marshal(metainfo.Info{
|
2016-09-12 15:11:32 +08:00
|
|
|
Name: "empty",
|
|
|
|
Length: 0,
|
|
|
|
PieceLength: 0,
|
2016-09-12 14:53:20 +08:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2017-06-01 20:57:08 +08:00
|
|
|
fp := filepath.Join(cfg.DataDir, "empty")
|
2016-09-12 14:53:20 +08:00
|
|
|
os.Remove(fp)
|
|
|
|
assert.False(t, missinggo.FilePathExists(fp))
|
|
|
|
tt, err := cl.AddTorrent(&metainfo.MetaInfo{
|
|
|
|
InfoBytes: ib,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer tt.Drop()
|
|
|
|
tt.DownloadAll()
|
|
|
|
require.True(t, cl.WaitAll())
|
|
|
|
assert.True(t, missinggo.FilePathExists(fp))
|
|
|
|
}
|
2016-09-12 15:11:32 +08:00
|
|
|
|
|
|
|
func TestEmptyFilesAndZeroPieceLengthWithFileStorage(t *testing.T) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
|
|
|
ci := storage.NewFile(cfg.DataDir)
|
|
|
|
defer ci.Close()
|
|
|
|
cfg.DefaultStorage = ci
|
|
|
|
testEmptyFilesAndZeroPieceLength(t, cfg)
|
2016-09-12 15:11:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestEmptyFilesAndZeroPieceLengthWithMMapStorage(t *testing.T) {
|
2017-06-01 20:57:08 +08:00
|
|
|
cfg := TestingConfig()
|
|
|
|
ci := storage.NewMMap(cfg.DataDir)
|
|
|
|
defer ci.Close()
|
|
|
|
cfg.DefaultStorage = ci
|
|
|
|
testEmptyFilesAndZeroPieceLength(t, cfg)
|
2016-09-12 15:11:32 +08:00
|
|
|
}
|
2016-12-06 12:41:08 +08:00
|
|
|
|
|
|
|
func TestPieceHashFailed(t *testing.T) {
|
|
|
|
mi := testutil.GreetingMetaInfo()
|
2018-01-28 13:07:11 +08:00
|
|
|
cl := new(Client)
|
2019-08-21 18:44:12 +08:00
|
|
|
cl.config = TestingConfig()
|
2018-01-29 15:19:53 +08:00
|
|
|
cl.initLogger()
|
2018-01-28 13:07:11 +08:00
|
|
|
tt := cl.newTorrent(mi.HashInfoBytes(), badStorage{})
|
|
|
|
tt.setChunkSize(2)
|
2016-12-06 12:41:08 +08:00
|
|
|
require.NoError(t, tt.setInfoBytes(mi.InfoBytes))
|
2018-07-25 11:41:50 +08:00
|
|
|
tt.cl.lock()
|
2020-01-10 12:09:21 +08:00
|
|
|
tt.pieces[1]._dirtyChunks.AddRange(0, 3)
|
2016-12-06 12:41:08 +08:00
|
|
|
require.True(t, tt.pieceAllDirty(1))
|
2020-01-23 10:54:37 +08:00
|
|
|
tt.pieceHashed(1, false, nil)
|
2016-12-06 12:41:08 +08:00
|
|
|
// Dirty chunks should be cleared so we can try again.
|
|
|
|
require.False(t, tt.pieceAllDirty(1))
|
2018-07-25 11:41:50 +08:00
|
|
|
tt.cl.unlock()
|
2016-12-06 12:41:08 +08:00
|
|
|
}
|
2017-11-08 16:31:10 +08:00
|
|
|
|
|
|
|
// Check the behaviour of Torrent.Metainfo when metadata is not completed.
|
|
|
|
func TestTorrentMetainfoIncompleteMetadata(t *testing.T) {
|
|
|
|
cfg := TestingConfig()
|
|
|
|
cfg.Debug = true
|
|
|
|
cl, err := NewClient(cfg)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer cl.Close()
|
|
|
|
|
|
|
|
mi := testutil.GreetingMetaInfo()
|
|
|
|
ih := mi.HashInfoBytes()
|
|
|
|
|
|
|
|
tt, _ := cl.AddTorrentInfoHash(ih)
|
|
|
|
assert.Nil(t, tt.Metainfo().InfoBytes)
|
|
|
|
assert.False(t, tt.haveAllMetadataPieces())
|
|
|
|
|
2018-04-12 09:41:07 +08:00
|
|
|
nc, err := net.Dial("tcp", fmt.Sprintf(":%d", cl.LocalPort()))
|
2017-11-08 16:31:10 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer nc.Close()
|
|
|
|
|
2018-07-07 09:31:29 +08:00
|
|
|
var pex PeerExtensionBits
|
2020-04-23 11:03:35 +08:00
|
|
|
pex.SetBit(pp.ExtensionBitExtended, true)
|
2019-07-19 14:15:46 +08:00
|
|
|
hr, err := pp.Handshake(nc, &ih, [20]byte{}, pex)
|
2017-11-08 16:31:10 +08:00
|
|
|
require.NoError(t, err)
|
2018-07-07 09:31:29 +08:00
|
|
|
assert.True(t, hr.PeerExtensionBits.GetBit(pp.ExtensionBitExtended))
|
2018-01-06 12:50:45 +08:00
|
|
|
assert.EqualValues(t, cl.PeerID(), hr.PeerID)
|
2018-07-12 07:42:00 +08:00
|
|
|
assert.EqualValues(t, ih, hr.Hash)
|
2017-11-08 16:31:10 +08:00
|
|
|
|
|
|
|
assert.EqualValues(t, 0, tt.metadataSize())
|
|
|
|
|
|
|
|
func() {
|
2018-07-25 11:41:50 +08:00
|
|
|
cl.lock()
|
|
|
|
defer cl.unlock()
|
2017-11-08 16:31:10 +08:00
|
|
|
go func() {
|
|
|
|
_, err = nc.Write(pp.Message{
|
|
|
|
Type: pp.Extended,
|
|
|
|
ExtendedID: pp.HandshakeExtendedID,
|
|
|
|
ExtendedPayload: func() []byte {
|
|
|
|
d := map[string]interface{}{
|
|
|
|
"metadata_size": len(mi.InfoBytes),
|
|
|
|
}
|
|
|
|
b, err := bencode.Marshal(d)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return b
|
|
|
|
}(),
|
|
|
|
}.MustMarshalBinary())
|
|
|
|
require.NoError(t, err)
|
|
|
|
}()
|
|
|
|
tt.metadataChanged.Wait()
|
|
|
|
}()
|
|
|
|
assert.Equal(t, make([]byte, len(mi.InfoBytes)), tt.metadataBytes)
|
|
|
|
assert.False(t, tt.haveAllMetadataPieces())
|
|
|
|
assert.Nil(t, tt.Metainfo().InfoBytes)
|
|
|
|
}
|