2014-05-28 23:27:48 +08:00
|
|
|
package torrent
|
|
|
|
|
|
|
|
import (
|
2022-01-12 12:01:33 +08:00
|
|
|
"encoding/binary"
|
2021-09-30 10:01:34 +08:00
|
|
|
"errors"
|
2022-01-12 12:01:33 +08:00
|
|
|
"fmt"
|
2022-06-25 21:16:58 +08:00
|
|
|
"golang.org/x/time/rate"
|
2016-05-07 16:56:44 +08:00
|
|
|
"io"
|
2018-06-11 10:20:51 +08:00
|
|
|
"net"
|
2016-09-11 13:43:57 +08:00
|
|
|
"sync"
|
2014-05-28 23:27:48 +08:00
|
|
|
"testing"
|
2014-12-26 14:17:00 +08:00
|
|
|
|
2020-05-03 17:31:20 +08:00
|
|
|
"github.com/frankban/quicktest"
|
2021-12-17 16:12:10 +08:00
|
|
|
qt "github.com/frankban/quicktest"
|
2019-08-21 18:58:40 +08:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2016-09-11 13:43:57 +08:00
|
|
|
"github.com/anacrolix/torrent/metainfo"
|
2016-11-22 11:20:48 +08:00
|
|
|
pp "github.com/anacrolix/torrent/peer_protocol"
|
2016-09-11 13:43:57 +08:00
|
|
|
"github.com/anacrolix/torrent/storage"
|
2014-05-28 23:27:48 +08:00
|
|
|
)
|
|
|
|
|
2016-05-11 21:50:21 +08:00
|
|
|
// Ensure that no race exists between sending a bitfield, and a subsequent
|
|
|
|
// Have that would potentially alter it.
|
|
|
|
func TestSendBitfieldThenHave(t *testing.T) {
|
2021-09-14 21:01:20 +08:00
|
|
|
var cl Client
|
|
|
|
cl.init(TestingConfig(t))
|
2018-02-03 12:09:38 +08:00
|
|
|
cl.initLogger()
|
2021-01-25 12:43:28 +08:00
|
|
|
c := cl.newConnection(nil, false, nil, "io.Pipe", "")
|
2018-02-03 12:09:38 +08:00
|
|
|
c.setTorrent(cl.newTorrent(metainfo.Hash{}, nil))
|
2021-09-19 13:16:37 +08:00
|
|
|
if err := c.t.setInfo(&metainfo.Info{Pieces: make([]byte, metainfo.HashSize*3)}); err != nil {
|
2021-09-14 21:01:20 +08:00
|
|
|
t.Log(err)
|
|
|
|
}
|
2019-10-03 17:09:55 +08:00
|
|
|
r, w := io.Pipe()
|
2021-11-08 11:47:01 +08:00
|
|
|
// c.r = r
|
2018-02-03 12:09:38 +08:00
|
|
|
c.w = w
|
2022-06-25 21:16:58 +08:00
|
|
|
c.startMessageWriter()
|
2020-02-21 08:51:24 +08:00
|
|
|
c.locker().Lock()
|
2020-01-10 12:09:21 +08:00
|
|
|
c.t._completedPieces.Add(1)
|
2020-02-21 08:07:50 +08:00
|
|
|
c.postBitfield( /*[]bool{false, true, false}*/ )
|
2020-02-21 08:51:24 +08:00
|
|
|
c.locker().Unlock()
|
|
|
|
c.locker().Lock()
|
2020-02-21 08:07:50 +08:00
|
|
|
c.have(2)
|
2020-02-21 08:51:24 +08:00
|
|
|
c.locker().Unlock()
|
2016-05-11 21:50:21 +08:00
|
|
|
b := make([]byte, 15)
|
|
|
|
n, err := io.ReadFull(r, b)
|
2020-02-21 08:51:24 +08:00
|
|
|
c.locker().Lock()
|
2016-05-11 21:50:21 +08:00
|
|
|
// This will cause connection.writer to terminate.
|
|
|
|
c.closed.Set()
|
2020-02-21 08:51:24 +08:00
|
|
|
c.locker().Unlock()
|
2016-05-11 21:50:21 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 15, n)
|
|
|
|
// Here we see that the bitfield doesn't have piece 2 set, as that should
|
|
|
|
// arrive in the following Have message.
|
|
|
|
require.EqualValues(t, "\x00\x00\x00\x02\x05@\x00\x00\x00\x05\x04\x00\x00\x00\x02", string(b))
|
|
|
|
}
|
2016-09-11 13:43:57 +08:00
|
|
|
|
|
|
|
type torrentStorage struct {
|
|
|
|
writeSem sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *torrentStorage) Close() error { return nil }
|
|
|
|
|
|
|
|
func (me *torrentStorage) Piece(mp metainfo.Piece) storage.PieceImpl {
|
|
|
|
return me
|
|
|
|
}
|
|
|
|
|
2017-10-12 13:09:32 +08:00
|
|
|
func (me *torrentStorage) Completion() storage.Completion {
|
|
|
|
return storage.Completion{}
|
2016-09-11 13:43:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (me *torrentStorage) MarkComplete() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *torrentStorage) MarkNotComplete() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *torrentStorage) ReadAt([]byte, int64) (int, error) {
|
|
|
|
panic("shouldn't be called")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *torrentStorage) WriteAt(b []byte, _ int64) (int, error) {
|
|
|
|
if len(b) != defaultChunkSize {
|
|
|
|
panic(len(b))
|
|
|
|
}
|
|
|
|
me.writeSem.Unlock()
|
|
|
|
return len(b), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkConnectionMainReadLoop(b *testing.B) {
|
2020-05-03 17:31:20 +08:00
|
|
|
c := quicktest.New(b)
|
2021-09-14 21:01:20 +08:00
|
|
|
var cl Client
|
|
|
|
cl.init(&ClientConfig{
|
|
|
|
DownloadRateLimiter: unlimited,
|
|
|
|
})
|
2019-10-04 10:38:46 +08:00
|
|
|
cl.initLogger()
|
2016-09-11 13:43:57 +08:00
|
|
|
ts := &torrentStorage{}
|
2021-12-15 15:54:47 +08:00
|
|
|
t := cl.newTorrent(metainfo.Hash{}, nil)
|
|
|
|
t.initialPieceCheckDisabled = true
|
2018-01-25 14:10:37 +08:00
|
|
|
require.NoError(b, t.setInfo(&metainfo.Info{
|
|
|
|
Pieces: make([]byte, 20),
|
|
|
|
Length: 1 << 20,
|
|
|
|
PieceLength: 1 << 20,
|
|
|
|
}))
|
2021-12-15 15:54:47 +08:00
|
|
|
t.storage = &storage.Torrent{TorrentImpl: storage.TorrentImpl{Piece: ts.Piece, Close: ts.Close}}
|
|
|
|
t.onSetInfo()
|
2021-10-13 12:16:53 +08:00
|
|
|
t._pendingPieces.Add(0)
|
2018-06-11 10:20:51 +08:00
|
|
|
r, w := net.Pipe()
|
2021-01-25 12:43:28 +08:00
|
|
|
cn := cl.newConnection(r, true, r.RemoteAddr(), r.RemoteAddr().Network(), regularNetConnPeerConnConnString(r))
|
2018-06-11 10:20:51 +08:00
|
|
|
cn.setTorrent(t)
|
2021-09-30 10:01:34 +08:00
|
|
|
mrlErrChan := make(chan error)
|
2018-06-25 12:09:08 +08:00
|
|
|
msg := pp.Message{
|
|
|
|
Type: pp.Piece,
|
|
|
|
Piece: make([]byte, defaultChunkSize),
|
|
|
|
}
|
2016-09-11 13:43:57 +08:00
|
|
|
go func() {
|
2018-07-25 11:41:50 +08:00
|
|
|
cl.lock()
|
2016-09-11 13:43:57 +08:00
|
|
|
err := cn.mainReadLoop()
|
|
|
|
if err != nil {
|
2021-09-30 10:01:34 +08:00
|
|
|
mrlErrChan <- err
|
2016-09-11 13:43:57 +08:00
|
|
|
}
|
2021-09-30 10:01:34 +08:00
|
|
|
close(mrlErrChan)
|
2016-09-11 13:43:57 +08:00
|
|
|
}()
|
2018-06-25 12:09:08 +08:00
|
|
|
wb := msg.MustMarshalBinary()
|
2016-09-11 13:43:57 +08:00
|
|
|
b.SetBytes(int64(len(msg.Piece)))
|
2018-06-25 12:09:08 +08:00
|
|
|
go func() {
|
2016-09-11 13:43:57 +08:00
|
|
|
ts.writeSem.Lock()
|
2021-09-14 11:46:50 +08:00
|
|
|
for i := 0; i < b.N; i += 1 {
|
2018-07-25 11:41:50 +08:00
|
|
|
cl.lock()
|
2018-06-25 12:09:08 +08:00
|
|
|
// The chunk must be written to storage everytime, to ensure the
|
|
|
|
// writeSem is unlocked.
|
2021-09-20 16:52:54 +08:00
|
|
|
t.pendAllChunkSpecs(0)
|
2021-09-19 13:16:37 +08:00
|
|
|
cn.validReceiveChunks = map[RequestIndex]int{
|
|
|
|
t.requestIndexFromRequest(newRequestFromMessage(&msg)): 1,
|
|
|
|
}
|
2018-07-25 11:41:50 +08:00
|
|
|
cl.unlock()
|
2018-06-25 12:09:08 +08:00
|
|
|
n, err := w.Write(wb)
|
|
|
|
require.NoError(b, err)
|
|
|
|
require.EqualValues(b, len(wb), n)
|
|
|
|
ts.writeSem.Lock()
|
|
|
|
}
|
2021-09-14 21:01:20 +08:00
|
|
|
if err := w.Close(); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2018-06-25 12:09:08 +08:00
|
|
|
}()
|
2021-09-30 10:01:34 +08:00
|
|
|
mrlErr := <-mrlErrChan
|
|
|
|
if mrlErr != nil && !errors.Is(mrlErr, io.EOF) {
|
|
|
|
c.Fatal(mrlErr)
|
|
|
|
}
|
2020-05-03 17:31:20 +08:00
|
|
|
c.Assert(cn._stats.ChunksReadUseful.Int64(), quicktest.Equals, int64(b.N))
|
2016-09-11 13:43:57 +08:00
|
|
|
}
|
2019-12-11 19:45:04 +08:00
|
|
|
|
2020-04-09 00:03:29 +08:00
|
|
|
func TestConnPexPeerFlags(t *testing.T) {
|
|
|
|
var (
|
|
|
|
tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
|
|
|
|
udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
|
|
|
|
)
|
2021-11-08 11:47:01 +08:00
|
|
|
testcases := []struct {
|
2019-12-11 19:45:04 +08:00
|
|
|
conn *PeerConn
|
|
|
|
f pp.PexPeerFlags
|
|
|
|
}{
|
2021-01-20 10:10:32 +08:00
|
|
|
{&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: false}}, 0},
|
|
|
|
{&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: true}}, pp.PexPrefersEncryption},
|
|
|
|
{&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: false}}, pp.PexOutgoingConn},
|
|
|
|
{&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: true}}, pp.PexOutgoingConn | pp.PexPrefersEncryption},
|
2021-01-25 12:43:28 +08:00
|
|
|
{&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}}, pp.PexSupportsUtp},
|
|
|
|
{&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn | pp.PexSupportsUtp},
|
|
|
|
{&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn},
|
|
|
|
{&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network()}}, 0},
|
2019-12-11 19:45:04 +08:00
|
|
|
}
|
|
|
|
for i, tc := range testcases {
|
|
|
|
f := tc.conn.pexPeerFlags()
|
|
|
|
require.EqualValues(t, tc.f, f, i)
|
|
|
|
}
|
|
|
|
}
|
2020-04-09 00:03:29 +08:00
|
|
|
|
|
|
|
func TestConnPexEvent(t *testing.T) {
|
|
|
|
var (
|
|
|
|
udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
|
|
|
|
tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
|
|
|
|
dialTcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4747}
|
|
|
|
dialUdpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4747}
|
|
|
|
)
|
2021-11-08 11:47:01 +08:00
|
|
|
testcases := []struct {
|
2020-04-09 00:03:29 +08:00
|
|
|
t pexEventType
|
|
|
|
c *PeerConn
|
|
|
|
e pexEvent
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
pexAdd,
|
2021-01-25 12:43:28 +08:00
|
|
|
&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}},
|
2021-12-07 02:46:25 +08:00
|
|
|
pexEvent{pexAdd, udpAddr, pp.PexSupportsUtp, nil},
|
2020-04-09 00:03:29 +08:00
|
|
|
},
|
|
|
|
{
|
|
|
|
pexDrop,
|
2021-01-25 12:43:28 +08:00
|
|
|
&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true, PeerListenPort: dialTcpAddr.Port}},
|
2021-12-07 02:46:25 +08:00
|
|
|
pexEvent{pexDrop, tcpAddr, pp.PexOutgoingConn, nil},
|
2020-04-09 00:03:29 +08:00
|
|
|
},
|
|
|
|
{
|
|
|
|
pexAdd,
|
2021-01-25 12:43:28 +08:00
|
|
|
&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), PeerListenPort: dialTcpAddr.Port}},
|
2021-12-07 02:46:25 +08:00
|
|
|
pexEvent{pexAdd, dialTcpAddr, 0, nil},
|
2020-04-09 00:03:29 +08:00
|
|
|
},
|
|
|
|
{
|
|
|
|
pexDrop,
|
2021-01-25 12:43:28 +08:00
|
|
|
&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), PeerListenPort: dialUdpAddr.Port}},
|
2021-12-07 02:46:25 +08:00
|
|
|
pexEvent{pexDrop, dialUdpAddr, pp.PexSupportsUtp, nil},
|
2020-04-09 00:03:29 +08:00
|
|
|
},
|
|
|
|
}
|
|
|
|
for i, tc := range testcases {
|
|
|
|
e := tc.c.pexEvent(tc.t)
|
|
|
|
require.EqualValues(t, tc.e, e, i)
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 16:12:10 +08:00
|
|
|
|
|
|
|
func TestHaveAllThenBitfield(t *testing.T) {
|
|
|
|
c := qt.New(t)
|
|
|
|
cl := newTestingClient(t)
|
|
|
|
tt := cl.newTorrentForTesting()
|
|
|
|
// cl.newConnection()
|
|
|
|
pc := PeerConn{
|
|
|
|
Peer: Peer{t: tt},
|
|
|
|
}
|
2022-05-09 09:34:08 +08:00
|
|
|
pc.initRequestState()
|
2021-12-17 16:12:10 +08:00
|
|
|
pc.peerImpl = &pc
|
|
|
|
tt.conns[&pc] = struct{}{}
|
|
|
|
c.Assert(pc.onPeerSentHaveAll(), qt.IsNil)
|
2021-12-17 19:06:21 +08:00
|
|
|
c.Check(pc.t.connsWithAllPieces, qt.DeepEquals, map[*Peer]struct{}{&pc.Peer: {}})
|
2021-12-17 16:12:10 +08:00
|
|
|
pc.peerSentBitfield([]bool{false, false, true, false, true, true, false, false})
|
|
|
|
c.Check(pc.peerMinPieces, qt.Equals, 6)
|
2021-12-17 19:06:21 +08:00
|
|
|
c.Check(pc.t.connsWithAllPieces, qt.HasLen, 0)
|
2021-12-17 16:12:10 +08:00
|
|
|
c.Assert(pc.t.setInfo(&metainfo.Info{
|
|
|
|
PieceLength: 0,
|
|
|
|
Pieces: make([]byte, pieceHash.Size()*7),
|
|
|
|
}), qt.IsNil)
|
|
|
|
pc.t.onSetInfo()
|
|
|
|
c.Check(tt.numPieces(), qt.Equals, 7)
|
|
|
|
c.Check(tt.pieceAvailabilityRuns(), qt.DeepEquals, []pieceAvailabilityRun{
|
|
|
|
// The last element of the bitfield is irrelevant, as the Torrent actually only has 7
|
|
|
|
// pieces.
|
|
|
|
{2, 0}, {1, 1}, {1, 0}, {2, 1}, {1, 0},
|
|
|
|
})
|
|
|
|
}
|
2021-12-24 05:55:57 +08:00
|
|
|
|
|
|
|
func TestApplyRequestStateWriteBufferConstraints(t *testing.T) {
|
|
|
|
c := qt.New(t)
|
|
|
|
c.Check(interestedMsgLen, qt.Equals, 5)
|
|
|
|
c.Check(requestMsgLen, qt.Equals, 17)
|
|
|
|
c.Check(maxLocalToRemoteRequests >= 8, qt.IsTrue)
|
|
|
|
c.Logf("max local to remote requests: %v", maxLocalToRemoteRequests)
|
|
|
|
}
|
2022-01-12 12:01:33 +08:00
|
|
|
|
|
|
|
func peerConnForPreferredNetworkDirection(localPeerId, remotePeerId int, outgoing, utp, ipv6 bool) *PeerConn {
|
|
|
|
pc := PeerConn{}
|
|
|
|
pc.outgoing = outgoing
|
|
|
|
if utp {
|
|
|
|
pc.Network = "udp"
|
|
|
|
}
|
|
|
|
if ipv6 {
|
|
|
|
pc.RemoteAddr = &net.TCPAddr{IP: net.ParseIP(fmt.Sprintf("::420"))}
|
|
|
|
} else {
|
|
|
|
pc.RemoteAddr = &net.TCPAddr{IP: net.IPv4(1, 2, 3, 4)}
|
|
|
|
}
|
|
|
|
binary.BigEndian.PutUint64(pc.PeerID[:], uint64(remotePeerId))
|
|
|
|
cl := Client{}
|
|
|
|
binary.BigEndian.PutUint64(cl.peerID[:], uint64(localPeerId))
|
|
|
|
pc.t = &Torrent{cl: &cl}
|
|
|
|
return &pc
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestPreferredNetworkDirection(t *testing.T) {
|
|
|
|
pc := peerConnForPreferredNetworkDirection
|
|
|
|
c := qt.New(t)
|
|
|
|
// Prefer outgoing to higher peer ID
|
|
|
|
c.Assert(pc(1, 2, true, false, false).hasPreferredNetworkOver(pc(1, 2, false, false, false)), qt.IsTrue)
|
|
|
|
c.Assert(pc(1, 2, false, false, false).hasPreferredNetworkOver(pc(1, 2, true, false, false)), qt.IsFalse)
|
|
|
|
c.Assert(pc(2, 1, false, false, false).hasPreferredNetworkOver(pc(2, 1, true, false, false)), qt.IsTrue)
|
|
|
|
// Don't prefer uTP
|
|
|
|
c.Assert(pc(1, 2, false, true, false).hasPreferredNetworkOver(pc(1, 2, false, false, false)), qt.IsFalse)
|
|
|
|
// Prefer IPv6
|
|
|
|
c.Assert(pc(1, 2, false, false, false).hasPreferredNetworkOver(pc(1, 2, false, false, true)), qt.IsFalse)
|
|
|
|
// No difference
|
|
|
|
c.Assert(pc(1, 2, false, false, false).hasPreferredNetworkOver(pc(1, 2, false, false, false)), qt.IsFalse)
|
|
|
|
}
|
2022-06-25 21:16:58 +08:00
|
|
|
|
|
|
|
func TestReceiveLargeRequest(t *testing.T) {
|
|
|
|
c := qt.New(t)
|
|
|
|
cl := newTestingClient(t)
|
|
|
|
pc := cl.newConnection(nil, false, nil, "test", "")
|
|
|
|
tor := cl.newTorrentForTesting()
|
|
|
|
tor.info = &metainfo.Info{PieceLength: 3 << 20}
|
|
|
|
pc.setTorrent(tor)
|
|
|
|
tor._completedPieces.Add(0)
|
|
|
|
pc.PeerExtensionBytes.SetBit(pp.ExtensionBitFast, true)
|
|
|
|
pc.choking = false
|
|
|
|
pc.initMessageWriter()
|
|
|
|
req := Request{}
|
|
|
|
req.Length = defaultChunkSize
|
|
|
|
c.Assert(pc.fastEnabled(), qt.IsTrue)
|
|
|
|
c.Check(pc.onReadRequest(req, false), qt.IsNil)
|
|
|
|
c.Check(pc.peerRequests, qt.HasLen, 1)
|
|
|
|
req.Length = 2 << 20
|
|
|
|
c.Check(pc.onReadRequest(req, false), qt.IsNil)
|
|
|
|
c.Check(pc.peerRequests, qt.HasLen, 2)
|
|
|
|
pc.peerRequests = nil
|
|
|
|
pc.t.cl.config.UploadRateLimiter = rate.NewLimiter(1, defaultChunkSize)
|
|
|
|
req.Length = defaultChunkSize
|
|
|
|
c.Check(pc.onReadRequest(req, false), qt.IsNil)
|
|
|
|
c.Check(pc.peerRequests, qt.HasLen, 1)
|
|
|
|
req.Length = 2 << 20
|
|
|
|
c.Check(pc.onReadRequest(req, false), qt.IsNil)
|
|
|
|
c.Check(pc.messageWriter.writeBuffer.Len(), qt.Equals, 17)
|
|
|
|
}
|