2013-09-26 17:49:15 +08:00
|
|
|
package torrent
|
|
|
|
|
|
|
|
import (
|
2013-09-30 19:51:08 +08:00
|
|
|
"bufio"
|
2014-09-11 12:20:47 +08:00
|
|
|
"bytes"
|
2013-09-29 06:11:24 +08:00
|
|
|
"crypto/rand"
|
2014-06-26 22:57:07 +08:00
|
|
|
"crypto/sha1"
|
2015-03-12 17:06:23 +08:00
|
|
|
"encoding/hex"
|
2013-09-26 17:49:15 +08:00
|
|
|
"errors"
|
2014-08-21 23:33:13 +08:00
|
|
|
"expvar"
|
2013-10-07 15:58:33 +08:00
|
|
|
"fmt"
|
2013-09-26 17:49:15 +08:00
|
|
|
"io"
|
2013-09-29 06:11:24 +08:00
|
|
|
"log"
|
2014-11-19 11:56:50 +08:00
|
|
|
"math/big"
|
2014-03-16 23:30:10 +08:00
|
|
|
mathRand "math/rand"
|
2013-09-29 06:11:24 +08:00
|
|
|
"net"
|
2015-03-10 23:41:41 +08:00
|
|
|
"net/url"
|
2013-09-26 17:49:15 +08:00
|
|
|
"os"
|
2014-11-29 09:41:53 +08:00
|
|
|
"path/filepath"
|
2014-11-19 11:56:50 +08:00
|
|
|
"sort"
|
2015-03-18 15:21:00 +08:00
|
|
|
"strconv"
|
2014-09-14 01:50:15 +08:00
|
|
|
"strings"
|
2013-10-20 22:07:01 +08:00
|
|
|
"time"
|
2014-03-20 13:58:09 +08:00
|
|
|
|
2015-08-06 06:56:36 +08:00
|
|
|
"github.com/anacrolix/missinggo"
|
2015-08-03 22:43:46 +08:00
|
|
|
. "github.com/anacrolix/missinggo"
|
2015-07-17 19:45:44 +08:00
|
|
|
"github.com/anacrolix/missinggo/perf"
|
2015-09-06 10:35:56 +08:00
|
|
|
"github.com/anacrolix/missinggo/pubsub"
|
2015-03-20 20:52:53 +08:00
|
|
|
"github.com/anacrolix/sync"
|
2015-03-26 14:18:08 +08:00
|
|
|
"github.com/anacrolix/utp"
|
|
|
|
"github.com/bradfitz/iter"
|
2015-09-23 16:25:22 +08:00
|
|
|
"github.com/edsrzf/mmap-go"
|
2015-03-26 14:18:08 +08:00
|
|
|
|
2015-04-28 13:24:17 +08:00
|
|
|
"github.com/anacrolix/torrent/bencode"
|
2015-03-20 13:37:44 +08:00
|
|
|
filePkg "github.com/anacrolix/torrent/data/file"
|
|
|
|
"github.com/anacrolix/torrent/dht"
|
|
|
|
"github.com/anacrolix/torrent/internal/pieceordering"
|
|
|
|
"github.com/anacrolix/torrent/iplist"
|
2015-04-28 13:24:17 +08:00
|
|
|
"github.com/anacrolix/torrent/metainfo"
|
2015-03-26 14:18:08 +08:00
|
|
|
"github.com/anacrolix/torrent/mse"
|
2015-03-20 13:37:44 +08:00
|
|
|
pp "github.com/anacrolix/torrent/peer_protocol"
|
|
|
|
"github.com/anacrolix/torrent/tracker"
|
2013-09-26 17:49:15 +08:00
|
|
|
)
|
|
|
|
|
2014-08-21 23:33:13 +08:00
|
|
|
var (
|
2015-06-22 17:48:30 +08:00
|
|
|
unwantedChunksReceived = expvar.NewInt("chunksReceivedUnwanted")
|
|
|
|
unexpectedChunksReceived = expvar.NewInt("chunksReceivedUnexpected")
|
|
|
|
chunksReceived = expvar.NewInt("chunksReceived")
|
2015-06-28 14:41:51 +08:00
|
|
|
|
2015-09-28 13:30:13 +08:00
|
|
|
peersAddedBySource = expvar.NewMap("peersAddedBySource")
|
2015-06-28 14:41:51 +08:00
|
|
|
|
|
|
|
uploadChunksPosted = expvar.NewInt("uploadChunksPosted")
|
|
|
|
unexpectedCancels = expvar.NewInt("unexpectedCancels")
|
|
|
|
postedCancels = expvar.NewInt("postedCancels")
|
|
|
|
duplicateConnsAvoided = expvar.NewInt("duplicateConnsAvoided")
|
|
|
|
|
|
|
|
pieceHashedCorrect = expvar.NewInt("pieceHashedCorrect")
|
|
|
|
pieceHashedNotCorrect = expvar.NewInt("pieceHashedNotCorrect")
|
|
|
|
|
|
|
|
unsuccessfulDials = expvar.NewInt("dialSuccessful")
|
|
|
|
successfulDials = expvar.NewInt("dialUnsuccessful")
|
|
|
|
|
2015-06-29 22:35:47 +08:00
|
|
|
acceptUTP = expvar.NewInt("acceptUTP")
|
|
|
|
acceptTCP = expvar.NewInt("acceptTCP")
|
|
|
|
acceptReject = expvar.NewInt("acceptReject")
|
|
|
|
|
2015-08-02 02:06:22 +08:00
|
|
|
peerExtensions = expvar.NewMap("peerExtensions")
|
|
|
|
completedHandshakeConnectionFlags = expvar.NewMap("completedHandshakeConnectionFlags")
|
2015-03-18 15:28:13 +08:00
|
|
|
// Count of connections to peer with same client ID.
|
|
|
|
connsToSelf = expvar.NewInt("connsToSelf")
|
|
|
|
// Number of completed connections to a client we're already connected with.
|
|
|
|
duplicateClientConns = expvar.NewInt("duplicateClientConns")
|
|
|
|
receivedMessageTypes = expvar.NewMap("receivedMessageTypes")
|
|
|
|
supportedExtensionMessages = expvar.NewMap("supportedExtensionMessages")
|
2014-08-21 23:33:13 +08:00
|
|
|
)
|
|
|
|
|
2014-08-28 08:06:36 +08:00
|
|
|
const (
|
|
|
|
// Justification for set bits follows.
|
|
|
|
//
|
2015-08-02 01:53:37 +08:00
|
|
|
// Extension protocol ([5]|=0x10):
|
|
|
|
// http://www.bittorrent.org/beps/bep_0010.html
|
|
|
|
//
|
|
|
|
// Fast Extension ([7]|=0x04):
|
|
|
|
// http://bittorrent.org/beps/bep_0006.html.
|
|
|
|
// Disabled until AllowedFast is implemented.
|
|
|
|
//
|
|
|
|
// DHT ([7]|=1):
|
|
|
|
// http://www.bittorrent.org/beps/bep_0005.html
|
2015-03-18 15:21:00 +08:00
|
|
|
defaultExtensionBytes = "\x00\x00\x00\x00\x00\x10\x00\x01"
|
2014-08-28 08:06:36 +08:00
|
|
|
|
2015-06-29 22:46:43 +08:00
|
|
|
socketsPerTorrent = 80
|
2015-02-21 11:56:17 +08:00
|
|
|
torrentPeersHighWater = 200
|
|
|
|
torrentPeersLowWater = 50
|
|
|
|
|
|
|
|
// Limit how long handshake can take. This is to reduce the lingering
|
|
|
|
// impact of a few bad apples. 4s loses 1% of successful handshakes that
|
|
|
|
// are obtained with 60s timeout, and 5% of unsuccessful handshakes.
|
2015-03-18 15:28:13 +08:00
|
|
|
btHandshakeTimeout = 4 * time.Second
|
|
|
|
handshakesTimeout = 20 * time.Second
|
2015-02-25 11:48:39 +08:00
|
|
|
|
2015-06-28 14:40:46 +08:00
|
|
|
// These are our extended message IDs.
|
2015-03-25 12:42:14 +08:00
|
|
|
metadataExtendedId = iota + 1 // 0 is reserved for deleting keys
|
|
|
|
pexExtendedId
|
|
|
|
|
2015-06-28 14:40:46 +08:00
|
|
|
// Updated occasionally to when there's been some changes to client
|
|
|
|
// behaviour in case other clients are assuming anything of us. See also
|
|
|
|
// `bep20`.
|
|
|
|
extendedHandshakeClientVersion = "go.torrent dev 20150624"
|
2014-08-28 08:06:36 +08:00
|
|
|
)
|
2014-08-21 16:12:49 +08:00
|
|
|
|
2014-03-16 23:30:10 +08:00
|
|
|
// Currently doesn't really queue, but should in the future.
|
2016-01-04 19:34:24 +08:00
|
|
|
func (cl *Client) queuePieceCheck(t *torrent, pieceIndex int) {
|
2015-10-16 19:10:03 +08:00
|
|
|
piece := &t.Pieces[pieceIndex]
|
2014-03-20 01:30:08 +08:00
|
|
|
if piece.QueuedForHash {
|
2013-10-20 22:07:01 +08:00
|
|
|
return
|
|
|
|
}
|
2014-03-20 01:30:08 +08:00
|
|
|
piece.QueuedForHash = true
|
2015-09-06 10:35:56 +08:00
|
|
|
t.publishPieceChange(int(pieceIndex))
|
2016-01-04 19:34:24 +08:00
|
|
|
go cl.verifyPiece(t, int(pieceIndex))
|
2013-10-20 22:07:01 +08:00
|
|
|
}
|
|
|
|
|
2015-02-24 22:34:57 +08:00
|
|
|
// Queue a piece check if one isn't already queued, and the piece has never
|
|
|
|
// been checked before.
|
2014-09-14 01:50:15 +08:00
|
|
|
func (cl *Client) queueFirstHash(t *torrent, piece int) {
|
2015-10-16 19:10:03 +08:00
|
|
|
p := &t.Pieces[piece]
|
2015-03-10 23:41:21 +08:00
|
|
|
if p.EverHashed || p.Hashing || p.QueuedForHash || t.pieceComplete(piece) {
|
2014-09-14 01:50:15 +08:00
|
|
|
return
|
|
|
|
}
|
2016-01-04 19:34:24 +08:00
|
|
|
cl.queuePieceCheck(t, piece)
|
2014-09-14 01:50:15 +08:00
|
|
|
}
|
|
|
|
|
2015-06-03 11:30:55 +08:00
|
|
|
// Clients contain zero or more Torrents. A client manages a blocklist, the
|
|
|
|
// TCP/UDP protocol ports, and DHT as desired.
|
2013-10-06 15:01:39 +08:00
|
|
|
type Client struct {
|
2015-04-27 12:05:27 +08:00
|
|
|
halfOpenLimit int
|
|
|
|
peerID [20]byte
|
|
|
|
listeners []net.Listener
|
|
|
|
utpSock *utp.Socket
|
|
|
|
dHT *dht.Server
|
2015-09-23 16:25:22 +08:00
|
|
|
ipBlockList iplist.Ranger
|
2015-04-27 12:05:27 +08:00
|
|
|
bannedTorrents map[InfoHash]struct{}
|
|
|
|
config Config
|
|
|
|
pruneTimer *time.Timer
|
|
|
|
extensionBytes peerExtensionBytes
|
2015-03-18 15:29:51 +08:00
|
|
|
// Set of addresses that have our client ID. This intentionally will
|
|
|
|
// include ourselves if we end up trying to connect to our own address
|
|
|
|
// through legitimate channels.
|
|
|
|
dopplegangerAddrs map[string]struct{}
|
2015-02-25 11:48:39 +08:00
|
|
|
|
|
|
|
torrentDataOpener TorrentDataOpener
|
2013-09-29 06:11:24 +08:00
|
|
|
|
2014-11-29 09:41:53 +08:00
|
|
|
mu sync.RWMutex
|
2013-10-20 22:07:01 +08:00
|
|
|
event sync.Cond
|
2014-03-18 19:39:33 +08:00
|
|
|
quit chan struct{}
|
2013-10-20 22:07:01 +08:00
|
|
|
|
2014-11-17 03:30:44 +08:00
|
|
|
torrents map[InfoHash]*torrent
|
2015-02-25 08:25:22 +08:00
|
|
|
}
|
2014-08-25 03:24:18 +08:00
|
|
|
|
2015-09-23 16:25:22 +08:00
|
|
|
func (me *Client) IPBlockList() iplist.Ranger {
|
2015-02-25 08:25:22 +08:00
|
|
|
me.mu.Lock()
|
|
|
|
defer me.mu.Unlock()
|
|
|
|
return me.ipBlockList
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
|
|
|
|
2015-09-23 16:25:22 +08:00
|
|
|
func (me *Client) SetIPBlockList(list iplist.Ranger) {
|
2014-11-29 09:41:53 +08:00
|
|
|
me.mu.Lock()
|
|
|
|
defer me.mu.Unlock()
|
|
|
|
me.ipBlockList = list
|
2014-11-30 10:33:17 +08:00
|
|
|
if me.dHT != nil {
|
|
|
|
me.dHT.SetIPBlockList(list)
|
|
|
|
}
|
2014-11-29 09:41:53 +08:00
|
|
|
}
|
|
|
|
|
2014-11-17 03:54:43 +08:00
|
|
|
func (me *Client) PeerID() string {
|
|
|
|
return string(me.peerID[:])
|
|
|
|
}
|
|
|
|
|
2014-11-17 03:16:26 +08:00
|
|
|
func (me *Client) ListenAddr() (addr net.Addr) {
|
|
|
|
for _, l := range me.listeners {
|
|
|
|
addr = l.Addr()
|
2015-05-20 16:14:42 +08:00
|
|
|
break
|
2014-11-17 03:16:26 +08:00
|
|
|
}
|
|
|
|
return
|
2014-08-21 16:07:06 +08:00
|
|
|
}
|
|
|
|
|
2014-11-19 11:56:50 +08:00
|
|
|
type hashSorter struct {
|
|
|
|
Hashes []InfoHash
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me hashSorter) Len() int {
|
|
|
|
return len(me.Hashes)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me hashSorter) Less(a, b int) bool {
|
|
|
|
return (&big.Int{}).SetBytes(me.Hashes[a][:]).Cmp((&big.Int{}).SetBytes(me.Hashes[b][:])) < 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me hashSorter) Swap(a, b int) {
|
|
|
|
me.Hashes[a], me.Hashes[b] = me.Hashes[b], me.Hashes[a]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) sortedTorrents() (ret []*torrent) {
|
|
|
|
var hs hashSorter
|
|
|
|
for ih := range cl.torrents {
|
|
|
|
hs.Hashes = append(hs.Hashes, ih)
|
|
|
|
}
|
|
|
|
sort.Sort(hs)
|
|
|
|
for _, ih := range hs.Hashes {
|
|
|
|
ret = append(ret, cl.torrent(ih))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-03-08 14:28:14 +08:00
|
|
|
// Writes out a human readable status of the client, such as for writing to a
|
|
|
|
// HTTP status page.
|
2014-11-19 11:56:50 +08:00
|
|
|
func (cl *Client) WriteStatus(_w io.Writer) {
|
2014-11-29 09:41:53 +08:00
|
|
|
cl.mu.RLock()
|
|
|
|
defer cl.mu.RUnlock()
|
2014-11-19 11:56:50 +08:00
|
|
|
w := bufio.NewWriter(_w)
|
|
|
|
defer w.Flush()
|
2015-02-25 11:52:19 +08:00
|
|
|
if addr := cl.ListenAddr(); addr != nil {
|
|
|
|
fmt.Fprintf(w, "Listening on %s\n", cl.ListenAddr())
|
|
|
|
} else {
|
2015-03-10 23:39:01 +08:00
|
|
|
fmt.Fprintln(w, "Not listening!")
|
2015-02-25 11:52:19 +08:00
|
|
|
}
|
2015-06-22 17:46:26 +08:00
|
|
|
fmt.Fprintf(w, "Peer ID: %+q\n", cl.peerID)
|
2014-08-21 16:07:06 +08:00
|
|
|
if cl.dHT != nil {
|
2014-12-07 11:19:02 +08:00
|
|
|
dhtStats := cl.dHT.Stats()
|
2015-08-03 22:31:53 +08:00
|
|
|
fmt.Fprintf(w, "DHT nodes: %d (%d good, %d banned)\n", dhtStats.Nodes, dhtStats.GoodNodes, dhtStats.BadNodes)
|
2015-04-01 14:29:55 +08:00
|
|
|
fmt.Fprintf(w, "DHT Server ID: %x\n", cl.dHT.ID())
|
|
|
|
fmt.Fprintf(w, "DHT port: %d\n", addrPort(cl.dHT.Addr()))
|
|
|
|
fmt.Fprintf(w, "DHT announces: %d\n", dhtStats.ConfirmedAnnounces)
|
|
|
|
fmt.Fprintf(w, "Outstanding transactions: %d\n", dhtStats.OutstandingTransactions)
|
2014-07-22 23:50:49 +08:00
|
|
|
}
|
2015-03-27 23:51:16 +08:00
|
|
|
fmt.Fprintf(w, "# Torrents: %d\n", len(cl.torrents))
|
2014-07-16 15:07:28 +08:00
|
|
|
fmt.Fprintln(w)
|
2014-11-19 11:56:50 +08:00
|
|
|
for _, t := range cl.sortedTorrents() {
|
2014-11-19 04:32:51 +08:00
|
|
|
if t.Name() == "" {
|
|
|
|
fmt.Fprint(w, "<unknown name>")
|
|
|
|
} else {
|
|
|
|
fmt.Fprint(w, t.Name())
|
|
|
|
}
|
2015-02-21 11:57:37 +08:00
|
|
|
fmt.Fprint(w, "\n")
|
2014-11-19 04:32:51 +08:00
|
|
|
if t.haveInfo() {
|
2015-02-25 12:42:47 +08:00
|
|
|
fmt.Fprintf(w, "%f%% of %d bytes", 100*(1-float32(t.bytesLeft())/float32(t.Length())), t.Length())
|
2015-02-21 11:57:37 +08:00
|
|
|
} else {
|
|
|
|
w.WriteString("<missing metainfo>")
|
2014-11-19 04:32:51 +08:00
|
|
|
}
|
|
|
|
fmt.Fprint(w, "\n")
|
2015-06-16 14:57:47 +08:00
|
|
|
t.writeStatus(w, cl)
|
2014-07-17 13:58:33 +08:00
|
|
|
fmt.Fprintln(w)
|
2014-06-26 15:29:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-01 22:09:04 +08:00
|
|
|
func dataReadAt(d Data, b []byte, off int64) (n int, err error) {
|
2015-04-14 21:59:41 +08:00
|
|
|
// defer func() {
|
|
|
|
// if err == io.ErrUnexpectedEOF && n != 0 {
|
|
|
|
// err = nil
|
|
|
|
// }
|
|
|
|
// }()
|
|
|
|
// log.Println("data read at", len(b), off)
|
2015-10-01 22:09:04 +08:00
|
|
|
return d.ReadAt(b, off)
|
2015-03-01 11:32:54 +08:00
|
|
|
}
|
|
|
|
|
2015-03-18 15:28:13 +08:00
|
|
|
// Calculates the number of pieces to set to Readahead priority, after the
|
|
|
|
// Now, and Next pieces.
|
2015-06-16 14:54:12 +08:00
|
|
|
func readaheadPieces(readahead, pieceLength int64) (ret int) {
|
2015-06-16 15:14:15 +08:00
|
|
|
// Expand the readahead to fit any partial pieces. Subtract 1 for the
|
|
|
|
// "next" piece that is assigned.
|
2015-06-16 14:54:12 +08:00
|
|
|
ret = int((readahead+pieceLength-1)/pieceLength - 1)
|
2015-06-16 15:14:15 +08:00
|
|
|
// Lengthen the "readahead tail" to smooth blockiness that occurs when the
|
|
|
|
// piece length is much larger than the readahead.
|
2015-06-16 14:54:12 +08:00
|
|
|
if ret < 2 {
|
2015-06-16 15:14:15 +08:00
|
|
|
ret++
|
2015-06-16 14:54:12 +08:00
|
|
|
}
|
|
|
|
return
|
2015-03-04 10:07:11 +08:00
|
|
|
}
|
|
|
|
|
2015-04-14 21:59:41 +08:00
|
|
|
func (cl *Client) readRaisePiecePriorities(t *torrent, off, readaheadBytes int64) {
|
2015-02-25 12:42:47 +08:00
|
|
|
index := int(off / int64(t.usualPieceSize()))
|
2015-06-01 16:22:12 +08:00
|
|
|
cl.raisePiecePriority(t, index, PiecePriorityNow)
|
2014-12-05 14:56:28 +08:00
|
|
|
index++
|
2015-02-09 21:12:29 +08:00
|
|
|
if index >= t.numPieces() {
|
2014-12-05 14:56:28 +08:00
|
|
|
return
|
|
|
|
}
|
2015-06-01 16:22:12 +08:00
|
|
|
cl.raisePiecePriority(t, index, PiecePriorityNext)
|
2015-04-14 21:59:41 +08:00
|
|
|
for range iter.N(readaheadPieces(readaheadBytes, t.Info.PieceLength)) {
|
2014-12-05 14:56:28 +08:00
|
|
|
index++
|
2015-02-09 21:12:29 +08:00
|
|
|
if index >= t.numPieces() {
|
2014-12-05 14:56:28 +08:00
|
|
|
break
|
|
|
|
}
|
2015-06-01 16:22:12 +08:00
|
|
|
cl.raisePiecePriority(t, index, PiecePriorityReadahead)
|
2014-12-05 14:56:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-14 21:59:41 +08:00
|
|
|
func (cl *Client) addUrgentRequests(t *torrent, off int64, n int) {
|
|
|
|
for n > 0 {
|
|
|
|
req, ok := t.offsetRequest(off)
|
|
|
|
if !ok {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if _, ok := t.urgent[req]; !ok && !t.haveChunk(req) {
|
|
|
|
if t.urgent == nil {
|
2015-07-15 13:31:18 +08:00
|
|
|
t.urgent = make(map[request]struct{}, (n+int(t.chunkSize)-1)/int(t.chunkSize))
|
2015-04-14 21:59:41 +08:00
|
|
|
}
|
|
|
|
t.urgent[req] = struct{}{}
|
|
|
|
cl.event.Broadcast() // Why?
|
|
|
|
index := int(req.Index)
|
|
|
|
cl.queueFirstHash(t, index)
|
|
|
|
cl.pieceChanged(t, index)
|
|
|
|
}
|
|
|
|
reqOff := t.requestOffset(req)
|
|
|
|
n1 := req.Length - pp.Integer(off-reqOff)
|
|
|
|
off += int64(n1)
|
|
|
|
n -= int(n1)
|
|
|
|
}
|
|
|
|
// log.Print(t.urgent)
|
|
|
|
}
|
|
|
|
|
2014-12-02 06:39:09 +08:00
|
|
|
func (cl *Client) configDir() string {
|
2015-04-27 12:05:27 +08:00
|
|
|
if cl.config.ConfigDir == "" {
|
2015-02-25 11:48:39 +08:00
|
|
|
return filepath.Join(os.Getenv("HOME"), ".config/torrent")
|
|
|
|
}
|
2015-04-27 12:05:27 +08:00
|
|
|
return cl.config.ConfigDir
|
2014-12-02 06:39:09 +08:00
|
|
|
}
|
|
|
|
|
2015-06-22 17:48:50 +08:00
|
|
|
// The directory where the Client expects to find and store configuration
|
|
|
|
// data. Defaults to $HOME/.config/torrent.
|
2014-12-03 15:07:50 +08:00
|
|
|
func (cl *Client) ConfigDir() string {
|
|
|
|
return cl.configDir()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *torrent) connPendPiece(c *connection, piece int) {
|
2015-09-26 15:27:35 +08:00
|
|
|
c.pendPiece(piece, t.Pieces[piece].Priority, t)
|
2014-12-03 15:07:50 +08:00
|
|
|
}
|
|
|
|
|
2014-12-05 14:56:28 +08:00
|
|
|
func (cl *Client) raisePiecePriority(t *torrent, piece int, priority piecePriority) {
|
|
|
|
if t.Pieces[piece].Priority < priority {
|
|
|
|
cl.prioritizePiece(t, piece, priority)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-03 15:07:50 +08:00
|
|
|
func (cl *Client) prioritizePiece(t *torrent, piece int, priority piecePriority) {
|
|
|
|
if t.havePiece(piece) {
|
2015-07-04 11:38:42 +08:00
|
|
|
priority = PiecePriorityNone
|
2014-12-03 15:07:50 +08:00
|
|
|
}
|
2015-06-01 16:23:35 +08:00
|
|
|
if priority != PiecePriorityNone {
|
|
|
|
cl.queueFirstHash(t, piece)
|
|
|
|
}
|
2015-10-16 19:10:03 +08:00
|
|
|
p := &t.Pieces[piece]
|
2015-06-01 16:23:35 +08:00
|
|
|
if p.Priority != priority {
|
|
|
|
p.Priority = priority
|
|
|
|
cl.pieceChanged(t, piece)
|
|
|
|
}
|
2014-12-03 15:07:50 +08:00
|
|
|
}
|
|
|
|
|
2015-09-23 16:25:22 +08:00
|
|
|
func loadPackedBlocklist(filename string) (ret iplist.Ranger, err error) {
|
|
|
|
f, err := os.Open(filename)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
err = nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
mm, err := mmap.Map(f, mmap.RDONLY, 0)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ret = iplist.NewFromPacked(mm)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-11-29 09:41:53 +08:00
|
|
|
func (cl *Client) setEnvBlocklist() (err error) {
|
|
|
|
filename := os.Getenv("TORRENT_BLOCKLIST_FILE")
|
|
|
|
defaultBlocklist := filename == ""
|
|
|
|
if defaultBlocklist {
|
2015-09-23 16:25:22 +08:00
|
|
|
cl.ipBlockList, err = loadPackedBlocklist(filepath.Join(cl.configDir(), "packed-blocklist"))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if cl.ipBlockList != nil {
|
|
|
|
return
|
|
|
|
}
|
2014-12-02 06:39:09 +08:00
|
|
|
filename = filepath.Join(cl.configDir(), "blocklist")
|
2014-11-29 09:41:53 +08:00
|
|
|
}
|
|
|
|
f, err := os.Open(filename)
|
|
|
|
if err != nil {
|
|
|
|
if defaultBlocklist {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer f.Close()
|
2015-06-22 17:44:59 +08:00
|
|
|
cl.ipBlockList, err = iplist.NewFromReader(f)
|
2014-11-29 09:41:53 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-12-02 06:39:09 +08:00
|
|
|
func (cl *Client) initBannedTorrents() error {
|
|
|
|
f, err := os.Open(filepath.Join(cl.configDir(), "banned_infohashes"))
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return fmt.Errorf("error opening banned infohashes file: %s", err)
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
scanner := bufio.NewScanner(f)
|
|
|
|
cl.bannedTorrents = make(map[InfoHash]struct{})
|
|
|
|
for scanner.Scan() {
|
2015-03-18 15:35:52 +08:00
|
|
|
if strings.HasPrefix(strings.TrimSpace(scanner.Text()), "#") {
|
|
|
|
continue
|
|
|
|
}
|
2014-12-02 06:39:09 +08:00
|
|
|
var ihs string
|
|
|
|
n, err := fmt.Sscanf(scanner.Text(), "%x", &ihs)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error reading infohash: %s", err)
|
|
|
|
}
|
|
|
|
if n != 1 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if len(ihs) != 20 {
|
|
|
|
return errors.New("bad infohash")
|
|
|
|
}
|
|
|
|
var ih InfoHash
|
|
|
|
CopyExact(&ih, ihs)
|
|
|
|
cl.bannedTorrents[ih] = struct{}{}
|
|
|
|
}
|
|
|
|
if err := scanner.Err(); err != nil {
|
|
|
|
return fmt.Errorf("error scanning file: %s", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-06-03 11:30:55 +08:00
|
|
|
// Creates a new client.
|
2014-08-21 16:07:06 +08:00
|
|
|
func NewClient(cfg *Config) (cl *Client, err error) {
|
|
|
|
if cfg == nil {
|
|
|
|
cfg = &Config{}
|
2013-10-14 22:39:12 +08:00
|
|
|
}
|
2014-08-21 16:07:06 +08:00
|
|
|
|
2015-04-01 11:30:22 +08:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
cl = nil
|
|
|
|
}
|
|
|
|
}()
|
2014-08-21 16:07:06 +08:00
|
|
|
cl = &Client{
|
2015-04-27 12:05:27 +08:00
|
|
|
halfOpenLimit: socketsPerTorrent,
|
|
|
|
config: *cfg,
|
2015-10-01 22:09:04 +08:00
|
|
|
torrentDataOpener: func(md *metainfo.Info) Data {
|
2015-03-07 14:11:45 +08:00
|
|
|
return filePkg.TorrentData(md, cfg.DataDir)
|
2015-02-25 11:48:39 +08:00
|
|
|
},
|
2015-03-18 15:29:51 +08:00
|
|
|
dopplegangerAddrs: make(map[string]struct{}),
|
2014-08-21 16:07:06 +08:00
|
|
|
|
|
|
|
quit: make(chan struct{}),
|
|
|
|
torrents: make(map[InfoHash]*torrent),
|
|
|
|
}
|
2015-03-13 03:21:13 +08:00
|
|
|
CopyExact(&cl.extensionBytes, defaultExtensionBytes)
|
2014-08-21 16:07:06 +08:00
|
|
|
cl.event.L = &cl.mu
|
2015-02-25 12:41:13 +08:00
|
|
|
if cfg.TorrentDataOpener != nil {
|
|
|
|
cl.torrentDataOpener = cfg.TorrentDataOpener
|
|
|
|
}
|
2014-11-29 09:41:53 +08:00
|
|
|
|
2015-08-03 23:07:22 +08:00
|
|
|
if cfg.IPBlocklist != nil {
|
|
|
|
cl.ipBlockList = cfg.IPBlocklist
|
|
|
|
} else if !cfg.NoDefaultBlocklist {
|
2014-12-03 04:23:01 +08:00
|
|
|
err = cl.setEnvBlocklist()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2014-11-29 09:41:53 +08:00
|
|
|
}
|
2014-08-21 16:07:06 +08:00
|
|
|
|
2014-12-02 06:39:09 +08:00
|
|
|
if err = cl.initBannedTorrents(); err != nil {
|
|
|
|
err = fmt.Errorf("error initing banned torrents: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-11-17 03:54:43 +08:00
|
|
|
if cfg.PeerID != "" {
|
|
|
|
CopyExact(&cl.peerID, cfg.PeerID)
|
|
|
|
} else {
|
2015-03-08 14:28:14 +08:00
|
|
|
o := copy(cl.peerID[:], bep20)
|
2014-11-17 03:54:43 +08:00
|
|
|
_, err = rand.Read(cl.peerID[o:])
|
|
|
|
if err != nil {
|
|
|
|
panic("error generating peer id")
|
|
|
|
}
|
2013-09-29 06:11:24 +08:00
|
|
|
}
|
2014-08-21 16:07:06 +08:00
|
|
|
|
2014-11-17 03:29:31 +08:00
|
|
|
// Returns the laddr string to listen on for the next Listen call.
|
|
|
|
listenAddr := func() string {
|
|
|
|
if addr := cl.ListenAddr(); addr != nil {
|
|
|
|
return addr.String()
|
|
|
|
}
|
2014-11-21 14:07:04 +08:00
|
|
|
if cfg.ListenAddr == "" {
|
2015-05-20 16:14:42 +08:00
|
|
|
return ":50007"
|
2014-11-21 14:07:04 +08:00
|
|
|
}
|
2014-11-17 03:29:31 +08:00
|
|
|
return cfg.ListenAddr
|
2014-05-21 15:40:54 +08:00
|
|
|
}
|
2015-04-27 12:05:27 +08:00
|
|
|
if !cl.config.DisableTCP {
|
2014-11-17 13:27:01 +08:00
|
|
|
var l net.Listener
|
2015-08-05 00:41:50 +08:00
|
|
|
l, err = net.Listen(func() string {
|
|
|
|
if cl.config.DisableIPv6 {
|
|
|
|
return "tcp4"
|
|
|
|
} else {
|
|
|
|
return "tcp"
|
|
|
|
}
|
|
|
|
}(), listenAddr())
|
2014-11-17 03:29:31 +08:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
cl.listeners = append(cl.listeners, l)
|
|
|
|
go cl.acceptConnections(l, false)
|
|
|
|
}
|
2015-04-27 12:05:27 +08:00
|
|
|
if !cl.config.DisableUTP {
|
2015-08-05 00:41:50 +08:00
|
|
|
cl.utpSock, err = utp.NewSocket(func() string {
|
|
|
|
if cl.config.DisableIPv6 {
|
|
|
|
return "udp4"
|
|
|
|
} else {
|
|
|
|
return "udp"
|
|
|
|
}
|
|
|
|
}(), listenAddr())
|
2014-11-17 03:29:31 +08:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2015-01-10 21:16:19 +08:00
|
|
|
cl.listeners = append(cl.listeners, cl.utpSock)
|
|
|
|
go cl.acceptConnections(cl.utpSock, true)
|
2014-03-17 22:44:22 +08:00
|
|
|
}
|
2014-08-21 16:07:06 +08:00
|
|
|
if !cfg.NoDHT {
|
2014-11-29 02:13:08 +08:00
|
|
|
dhtCfg := cfg.DHTConfig
|
|
|
|
if dhtCfg == nil {
|
2015-08-03 22:43:46 +08:00
|
|
|
dhtCfg = &dht.ServerConfig{}
|
|
|
|
}
|
|
|
|
if dhtCfg.IPBlocklist == nil {
|
|
|
|
dhtCfg.IPBlocklist = cl.ipBlockList
|
2014-11-20 10:02:20 +08:00
|
|
|
}
|
2014-11-29 02:13:08 +08:00
|
|
|
if dhtCfg.Addr == "" {
|
|
|
|
dhtCfg.Addr = listenAddr()
|
2014-11-20 10:02:20 +08:00
|
|
|
}
|
2015-01-10 21:16:19 +08:00
|
|
|
if dhtCfg.Conn == nil && cl.utpSock != nil {
|
2015-10-03 22:02:14 +08:00
|
|
|
dhtCfg.Conn = cl.utpSock
|
2014-11-29 02:13:08 +08:00
|
|
|
}
|
|
|
|
cl.dHT, err = dht.NewServer(dhtCfg)
|
2014-08-21 16:07:06 +08:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
2014-03-17 22:44:22 +08:00
|
|
|
}
|
|
|
|
|
2014-03-20 13:58:09 +08:00
|
|
|
func (cl *Client) stopped() bool {
|
|
|
|
select {
|
|
|
|
case <-cl.quit:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-09 00:36:05 +08:00
|
|
|
// Stops the client. All connections to peers are closed and all activity will
|
|
|
|
// come to a halt.
|
2015-03-08 14:28:14 +08:00
|
|
|
func (me *Client) Close() {
|
2014-04-09 00:36:05 +08:00
|
|
|
me.mu.Lock()
|
2015-02-09 21:21:50 +08:00
|
|
|
defer me.mu.Unlock()
|
2015-08-03 23:15:09 +08:00
|
|
|
select {
|
|
|
|
case <-me.quit:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2014-03-18 19:39:33 +08:00
|
|
|
close(me.quit)
|
2015-08-03 23:15:09 +08:00
|
|
|
if me.dHT != nil {
|
|
|
|
me.dHT.Close()
|
|
|
|
}
|
2014-11-21 14:07:04 +08:00
|
|
|
for _, l := range me.listeners {
|
|
|
|
l.Close()
|
|
|
|
}
|
2014-03-18 19:39:33 +08:00
|
|
|
for _, t := range me.torrents {
|
2015-02-09 21:12:29 +08:00
|
|
|
t.close()
|
2014-03-18 19:39:33 +08:00
|
|
|
}
|
2015-08-03 23:15:09 +08:00
|
|
|
me.event.Broadcast()
|
2014-03-18 19:39:33 +08:00
|
|
|
}
|
|
|
|
|
2014-12-01 17:27:11 +08:00
|
|
|
var ipv6BlockRange = iplist.Range{Description: "non-IPv4 address"}
|
|
|
|
|
2015-10-18 21:00:26 +08:00
|
|
|
func (cl *Client) ipBlockRange(ip net.IP) (r iplist.Range, blocked bool) {
|
2014-11-29 09:41:53 +08:00
|
|
|
if cl.ipBlockList == nil {
|
2014-11-30 10:33:17 +08:00
|
|
|
return
|
2014-11-29 09:41:53 +08:00
|
|
|
}
|
2015-04-01 14:36:51 +08:00
|
|
|
ip4 := ip.To4()
|
2015-10-18 21:00:26 +08:00
|
|
|
// If blocklists are enabled, then block non-IPv4 addresses, because
|
|
|
|
// blocklists do not yet support IPv6.
|
2015-04-01 14:36:51 +08:00
|
|
|
if ip4 == nil {
|
2015-11-13 19:33:50 +08:00
|
|
|
if missinggo.CryHeard() {
|
|
|
|
log.Printf("blocking non-IPv4 address: %s", ip)
|
|
|
|
}
|
2015-10-18 21:00:26 +08:00
|
|
|
r = ipv6BlockRange
|
|
|
|
blocked = true
|
2014-12-01 17:27:11 +08:00
|
|
|
return
|
|
|
|
}
|
2015-10-18 21:00:26 +08:00
|
|
|
return cl.ipBlockList.Lookup(ip4)
|
2014-11-29 09:41:53 +08:00
|
|
|
}
|
|
|
|
|
2015-03-18 15:36:27 +08:00
|
|
|
func (cl *Client) waitAccept() {
|
|
|
|
cl.mu.Lock()
|
|
|
|
defer cl.mu.Unlock()
|
|
|
|
for {
|
|
|
|
for _, t := range cl.torrents {
|
|
|
|
if cl.wantConns(t) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2015-08-03 23:15:09 +08:00
|
|
|
select {
|
|
|
|
case <-cl.quit:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2015-03-18 15:36:27 +08:00
|
|
|
cl.event.Wait()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-17 03:29:31 +08:00
|
|
|
func (cl *Client) acceptConnections(l net.Listener, utp bool) {
|
2014-03-17 22:44:22 +08:00
|
|
|
for {
|
2015-03-18 15:36:27 +08:00
|
|
|
cl.waitAccept()
|
2014-11-29 09:41:53 +08:00
|
|
|
// We accept all connections immediately, because we don't know what
|
2014-08-28 08:05:41 +08:00
|
|
|
// torrent they're for.
|
2014-11-17 03:29:31 +08:00
|
|
|
conn, err := l.Accept()
|
2014-03-18 19:39:33 +08:00
|
|
|
select {
|
|
|
|
case <-cl.quit:
|
2014-07-03 23:44:15 +08:00
|
|
|
if conn != nil {
|
|
|
|
conn.Close()
|
|
|
|
}
|
2014-03-18 19:39:33 +08:00
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2014-03-17 22:44:22 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Print(err)
|
|
|
|
return
|
|
|
|
}
|
2015-06-29 22:35:47 +08:00
|
|
|
if utp {
|
|
|
|
acceptUTP.Add(1)
|
|
|
|
} else {
|
|
|
|
acceptTCP.Add(1)
|
|
|
|
}
|
2014-11-29 09:41:53 +08:00
|
|
|
cl.mu.RLock()
|
2015-03-18 15:29:51 +08:00
|
|
|
doppleganger := cl.dopplegangerAddr(conn.RemoteAddr().String())
|
2015-10-18 21:00:26 +08:00
|
|
|
_, blocked := cl.ipBlockRange(AddrIP(conn.RemoteAddr()))
|
2014-11-29 09:41:53 +08:00
|
|
|
cl.mu.RUnlock()
|
2015-10-18 21:00:26 +08:00
|
|
|
if blocked || doppleganger {
|
2015-06-29 22:35:47 +08:00
|
|
|
acceptReject.Add(1)
|
2015-03-12 17:04:44 +08:00
|
|
|
// log.Printf("inbound connection from %s blocked by %s", conn.RemoteAddr(), blockRange)
|
2014-12-26 14:18:36 +08:00
|
|
|
conn.Close()
|
2014-11-29 09:41:53 +08:00
|
|
|
continue
|
|
|
|
}
|
2015-03-18 15:28:13 +08:00
|
|
|
go cl.incomingConnection(conn, utp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) incomingConnection(nc net.Conn, utp bool) {
|
|
|
|
defer nc.Close()
|
|
|
|
if tc, ok := nc.(*net.TCPConn); ok {
|
|
|
|
tc.SetLinger(0)
|
|
|
|
}
|
|
|
|
c := newConnection()
|
|
|
|
c.conn = nc
|
|
|
|
c.rw = nc
|
|
|
|
c.Discovery = peerSourceIncoming
|
|
|
|
c.uTP = utp
|
|
|
|
err := cl.runReceivedConn(c)
|
|
|
|
if err != nil {
|
2015-03-25 14:28:34 +08:00
|
|
|
// log.Print(err)
|
2014-03-17 22:44:22 +08:00
|
|
|
}
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
|
|
|
|
2015-03-20 07:52:01 +08:00
|
|
|
// Returns a handle to the given torrent, if it's present in the client.
|
2015-12-27 19:49:15 +08:00
|
|
|
func (cl *Client) Torrent(ih InfoHash) (T Download, ok bool) {
|
2015-03-18 15:28:13 +08:00
|
|
|
cl.mu.Lock()
|
|
|
|
defer cl.mu.Unlock()
|
|
|
|
t, ok := cl.torrents[ih]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2015-04-29 22:30:19 +08:00
|
|
|
T = Torrent{cl, t}
|
2015-03-18 15:28:13 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-04-09 00:36:05 +08:00
|
|
|
func (me *Client) torrent(ih InfoHash) *torrent {
|
2015-03-08 14:28:14 +08:00
|
|
|
return me.torrents[ih]
|
2013-09-29 06:11:24 +08:00
|
|
|
}
|
|
|
|
|
2014-11-17 13:27:01 +08:00
|
|
|
type dialResult struct {
|
2015-03-18 15:28:13 +08:00
|
|
|
Conn net.Conn
|
|
|
|
UTP bool
|
2014-11-17 13:27:01 +08:00
|
|
|
}
|
|
|
|
|
2015-03-18 15:28:13 +08:00
|
|
|
func doDial(dial func(addr string, t *torrent) (net.Conn, error), ch chan dialResult, utp bool, addr string, t *torrent) {
|
|
|
|
conn, err := dial(addr, t)
|
2014-11-18 08:04:33 +08:00
|
|
|
if err != nil {
|
2014-12-26 14:18:36 +08:00
|
|
|
if conn != nil {
|
|
|
|
conn.Close()
|
|
|
|
}
|
2014-11-18 08:04:33 +08:00
|
|
|
conn = nil // Pedantic
|
|
|
|
}
|
2014-11-17 13:27:01 +08:00
|
|
|
ch <- dialResult{conn, utp}
|
2014-11-17 15:44:06 +08:00
|
|
|
if err == nil {
|
|
|
|
successfulDials.Add(1)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
unsuccessfulDials.Add(1)
|
2014-11-17 13:27:01 +08:00
|
|
|
}
|
|
|
|
|
2014-11-19 11:53:00 +08:00
|
|
|
func reducedDialTimeout(max time.Duration, halfOpenLimit int, pendingPeers int) (ret time.Duration) {
|
|
|
|
ret = max / time.Duration((pendingPeers+halfOpenLimit)/halfOpenLimit)
|
|
|
|
if ret < minDialTimeout {
|
|
|
|
ret = minDialTimeout
|
|
|
|
}
|
|
|
|
return
|
2014-11-18 08:04:09 +08:00
|
|
|
}
|
|
|
|
|
2015-09-17 10:54:03 +08:00
|
|
|
// Returns whether an address is known to connect to a client with our own ID.
|
2015-03-18 15:29:51 +08:00
|
|
|
func (me *Client) dopplegangerAddr(addr string) bool {
|
|
|
|
_, ok := me.dopplegangerAddrs[addr]
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2014-07-22 19:45:12 +08:00
|
|
|
// Start the process of connecting to the given peer for the given torrent if
|
|
|
|
// appropriate.
|
2014-11-17 03:29:31 +08:00
|
|
|
func (me *Client) initiateConn(peer Peer, t *torrent) {
|
2014-08-21 16:07:06 +08:00
|
|
|
if peer.Id == me.peerID {
|
2013-09-29 06:11:24 +08:00
|
|
|
return
|
|
|
|
}
|
2014-11-17 03:30:44 +08:00
|
|
|
addr := net.JoinHostPort(peer.IP.String(), fmt.Sprintf("%d", peer.Port))
|
2015-03-18 15:29:51 +08:00
|
|
|
if me.dopplegangerAddr(addr) || t.addrActive(addr) {
|
2014-11-17 03:30:44 +08:00
|
|
|
duplicateConnsAvoided.Add(1)
|
|
|
|
return
|
2014-08-28 07:35:13 +08:00
|
|
|
}
|
2015-10-18 21:00:26 +08:00
|
|
|
if r, ok := me.ipBlockRange(peer.IP); ok {
|
2014-11-30 10:33:17 +08:00
|
|
|
log.Printf("outbound connect to %s blocked by IP blocklist rule %s", peer.IP, r)
|
2014-11-29 09:41:53 +08:00
|
|
|
return
|
|
|
|
}
|
2014-11-17 03:30:44 +08:00
|
|
|
t.HalfOpen[addr] = struct{}{}
|
2015-03-18 15:28:13 +08:00
|
|
|
go me.outgoingConnection(t, addr, peer.Source)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) dialTimeout(t *torrent) time.Duration {
|
2015-03-30 20:10:37 +08:00
|
|
|
me.mu.Lock()
|
|
|
|
pendingPeers := len(t.Peers)
|
|
|
|
me.mu.Unlock()
|
|
|
|
return reducedDialTimeout(nominalDialTimeout, me.halfOpenLimit, pendingPeers)
|
2015-03-18 15:28:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) dialTCP(addr string, t *torrent) (c net.Conn, err error) {
|
|
|
|
c, err = net.DialTimeout("tcp", addr, me.dialTimeout(t))
|
|
|
|
if err == nil {
|
|
|
|
c.(*net.TCPConn).SetLinger(0)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) dialUTP(addr string, t *torrent) (c net.Conn, err error) {
|
|
|
|
return me.utpSock.DialTimeout(addr, me.dialTimeout(t))
|
|
|
|
}
|
|
|
|
|
2015-08-02 02:04:42 +08:00
|
|
|
// Returns a connection over UTP or TCP, whichever is first to connect.
|
|
|
|
func (me *Client) dialFirst(addr string, t *torrent) (conn net.Conn, utp bool) {
|
2015-03-18 15:28:13 +08:00
|
|
|
// Initiate connections via TCP and UTP simultaneously. Use the first one
|
|
|
|
// that succeeds.
|
|
|
|
left := 0
|
2015-04-27 12:05:27 +08:00
|
|
|
if !me.config.DisableUTP {
|
2015-03-18 15:28:13 +08:00
|
|
|
left++
|
|
|
|
}
|
2015-04-27 12:05:27 +08:00
|
|
|
if !me.config.DisableTCP {
|
2015-03-18 15:28:13 +08:00
|
|
|
left++
|
|
|
|
}
|
|
|
|
resCh := make(chan dialResult, left)
|
2015-04-27 12:05:27 +08:00
|
|
|
if !me.config.DisableUTP {
|
2015-03-18 15:28:13 +08:00
|
|
|
go doDial(me.dialUTP, resCh, true, addr, t)
|
|
|
|
}
|
2015-04-27 12:05:27 +08:00
|
|
|
if !me.config.DisableTCP {
|
2015-03-18 15:28:13 +08:00
|
|
|
go doDial(me.dialTCP, resCh, false, addr, t)
|
|
|
|
}
|
|
|
|
var res dialResult
|
|
|
|
// Wait for a successful connection.
|
|
|
|
for ; left > 0 && res.Conn == nil; left-- {
|
|
|
|
res = <-resCh
|
|
|
|
}
|
|
|
|
if left > 0 {
|
|
|
|
// There are still incompleted dials.
|
2014-04-03 20:16:59 +08:00
|
|
|
go func() {
|
2015-03-18 15:28:13 +08:00
|
|
|
for ; left > 0; left-- {
|
|
|
|
conn := (<-resCh).Conn
|
|
|
|
if conn != nil {
|
|
|
|
conn.Close()
|
|
|
|
}
|
2014-04-03 20:16:59 +08:00
|
|
|
}
|
|
|
|
}()
|
2015-03-18 15:28:13 +08:00
|
|
|
}
|
|
|
|
conn = res.Conn
|
|
|
|
utp = res.UTP
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) noLongerHalfOpen(t *torrent, addr string) {
|
|
|
|
if _, ok := t.HalfOpen[addr]; !ok {
|
|
|
|
panic("invariant broken")
|
|
|
|
}
|
|
|
|
delete(t.HalfOpen, addr)
|
|
|
|
me.openNewConns(t)
|
|
|
|
}
|
|
|
|
|
2015-08-02 02:04:42 +08:00
|
|
|
// Performs initiator handshakes and returns a connection.
|
|
|
|
func (me *Client) handshakesConnection(nc net.Conn, t *torrent, encrypted, utp bool) (c *connection, err error) {
|
|
|
|
c = newConnection()
|
|
|
|
c.conn = nc
|
|
|
|
c.rw = nc
|
|
|
|
c.encrypted = encrypted
|
|
|
|
c.uTP = utp
|
|
|
|
err = nc.SetDeadline(time.Now().Add(handshakesTimeout))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ok, err := me.initiateHandshakes(c, t)
|
|
|
|
if !ok {
|
|
|
|
c = nil
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-03-18 15:28:13 +08:00
|
|
|
// Returns nil connection and nil error if no connection could be established
|
|
|
|
// for valid reasons.
|
|
|
|
func (me *Client) establishOutgoingConn(t *torrent, addr string) (c *connection, err error) {
|
2015-08-02 02:04:42 +08:00
|
|
|
nc, utp := me.dialFirst(addr, t)
|
2015-03-18 15:28:13 +08:00
|
|
|
if nc == nil {
|
|
|
|
return
|
|
|
|
}
|
2015-08-02 02:04:42 +08:00
|
|
|
c, err = me.handshakesConnection(nc, t, !me.config.DisableEncryption, utp)
|
2015-03-18 15:28:13 +08:00
|
|
|
if err != nil {
|
|
|
|
nc.Close()
|
|
|
|
return
|
|
|
|
} else if c != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
nc.Close()
|
2015-06-08 16:16:01 +08:00
|
|
|
if me.config.DisableEncryption {
|
|
|
|
// We already tried without encryption.
|
|
|
|
return
|
|
|
|
}
|
2015-08-02 01:53:37 +08:00
|
|
|
// Try again without encryption, using whichever protocol type worked last
|
|
|
|
// time.
|
2015-03-18 15:28:13 +08:00
|
|
|
if utp {
|
|
|
|
nc, err = me.dialUTP(addr, t)
|
|
|
|
} else {
|
|
|
|
nc, err = me.dialTCP(addr, t)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error dialing for unencrypted connection: %s", err)
|
|
|
|
return
|
|
|
|
}
|
2015-08-02 02:04:42 +08:00
|
|
|
c, err = me.handshakesConnection(nc, t, false, utp)
|
2015-03-18 15:28:13 +08:00
|
|
|
if err != nil {
|
|
|
|
nc.Close()
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2014-11-17 13:27:01 +08:00
|
|
|
|
2015-03-18 15:28:13 +08:00
|
|
|
// Called to dial out and run a connection. The addr we're given is already
|
|
|
|
// considered half-open.
|
|
|
|
func (me *Client) outgoingConnection(t *torrent, addr string, ps peerSource) {
|
|
|
|
c, err := me.establishOutgoingConn(t, addr)
|
|
|
|
me.mu.Lock()
|
|
|
|
defer me.mu.Unlock()
|
|
|
|
// Don't release lock between here and addConnection, unless it's for
|
|
|
|
// failure.
|
|
|
|
me.noLongerHalfOpen(t, addr)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if c == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
c.Discovery = ps
|
|
|
|
err = me.runInitiatedHandshookConn(c, t)
|
|
|
|
if err != nil {
|
2015-03-25 14:28:34 +08:00
|
|
|
// log.Print(err)
|
2015-03-18 15:28:13 +08:00
|
|
|
}
|
2013-09-29 06:11:24 +08:00
|
|
|
}
|
|
|
|
|
2014-11-17 03:16:26 +08:00
|
|
|
// The port number for incoming peer connections. 0 if the client isn't
|
|
|
|
// listening.
|
2014-06-29 16:57:49 +08:00
|
|
|
func (cl *Client) incomingPeerPort() int {
|
2014-11-17 03:16:26 +08:00
|
|
|
listenAddr := cl.ListenAddr()
|
|
|
|
if listenAddr == nil {
|
2014-06-29 16:57:49 +08:00
|
|
|
return 0
|
|
|
|
}
|
2014-11-17 03:16:26 +08:00
|
|
|
return addrPort(listenAddr)
|
2014-06-29 16:57:49 +08:00
|
|
|
}
|
|
|
|
|
2014-11-17 03:16:26 +08:00
|
|
|
// Convert a net.Addr to its compact IP representation. Either 4 or 16 bytes
|
|
|
|
// per "yourip" field of http://www.bittorrent.org/beps/bep_0010.html.
|
2014-07-22 19:45:12 +08:00
|
|
|
func addrCompactIP(addr net.Addr) (string, error) {
|
2014-11-17 03:16:26 +08:00
|
|
|
host, _, err := net.SplitHostPort(addr.String())
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
ip := net.ParseIP(host)
|
|
|
|
if v4 := ip.To4(); v4 != nil {
|
|
|
|
if len(v4) != 4 {
|
|
|
|
panic(v4)
|
2014-07-22 19:45:12 +08:00
|
|
|
}
|
2014-11-17 03:16:26 +08:00
|
|
|
return string(v4), nil
|
2014-07-22 19:45:12 +08:00
|
|
|
}
|
2014-11-17 03:16:26 +08:00
|
|
|
return string(ip.To16()), nil
|
2014-07-22 19:45:12 +08:00
|
|
|
}
|
|
|
|
|
2015-03-13 03:21:13 +08:00
|
|
|
func handshakeWriter(w io.Writer, bb <-chan []byte, done chan<- error) {
|
2014-08-21 16:12:49 +08:00
|
|
|
var err error
|
|
|
|
for b := range bb {
|
|
|
|
_, err = w.Write(b)
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
done <- err
|
|
|
|
}
|
|
|
|
|
2015-03-12 17:04:44 +08:00
|
|
|
type (
|
|
|
|
peerExtensionBytes [8]byte
|
|
|
|
peerID [20]byte
|
|
|
|
)
|
2014-08-21 16:12:49 +08:00
|
|
|
|
2015-03-13 03:21:13 +08:00
|
|
|
func (me *peerExtensionBytes) SupportsExtended() bool {
|
|
|
|
return me[5]&0x10 != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *peerExtensionBytes) SupportsDHT() bool {
|
|
|
|
return me[7]&0x01 != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *peerExtensionBytes) SupportsFast() bool {
|
|
|
|
return me[7]&0x04 != 0
|
|
|
|
}
|
|
|
|
|
2014-08-21 16:12:49 +08:00
|
|
|
type handshakeResult struct {
|
|
|
|
peerExtensionBytes
|
|
|
|
peerID
|
|
|
|
InfoHash
|
|
|
|
}
|
|
|
|
|
2015-02-09 21:16:01 +08:00
|
|
|
// ih is nil if we expect the peer to declare the InfoHash, such as when the
|
|
|
|
// peer initiated the connection. Returns ok if the handshake was successful,
|
|
|
|
// and err if there was an unexpected condition other than the peer simply
|
|
|
|
// abandoning the handshake.
|
2015-03-13 03:21:13 +08:00
|
|
|
func handshake(sock io.ReadWriter, ih *InfoHash, peerID [20]byte, extensions peerExtensionBytes) (res handshakeResult, ok bool, err error) {
|
2014-08-21 16:12:49 +08:00
|
|
|
// Bytes to be sent to the peer. Should never block the sender.
|
|
|
|
postCh := make(chan []byte, 4)
|
|
|
|
// A single error value sent when the writer completes.
|
|
|
|
writeDone := make(chan error, 1)
|
|
|
|
// Performs writes to the socket and ensures posts don't block.
|
|
|
|
go handshakeWriter(sock, postCh, writeDone)
|
|
|
|
|
2014-03-20 19:01:56 +08:00
|
|
|
defer func() {
|
2014-08-21 16:12:49 +08:00
|
|
|
close(postCh) // Done writing.
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
// Wait until writes complete before returning from handshake.
|
|
|
|
err = <-writeDone
|
|
|
|
if err != nil {
|
2015-03-18 15:28:13 +08:00
|
|
|
err = fmt.Errorf("error writing: %s", err)
|
2014-08-21 16:12:49 +08:00
|
|
|
}
|
2014-03-20 19:01:56 +08:00
|
|
|
}()
|
2014-08-21 16:12:49 +08:00
|
|
|
|
|
|
|
post := func(bb []byte) {
|
|
|
|
select {
|
|
|
|
case postCh <- bb:
|
|
|
|
default:
|
|
|
|
panic("mustn't block while posting")
|
|
|
|
}
|
2014-03-20 21:14:17 +08:00
|
|
|
}
|
2014-08-21 16:12:49 +08:00
|
|
|
|
|
|
|
post([]byte(pp.Protocol))
|
2015-03-13 03:21:13 +08:00
|
|
|
post(extensions[:])
|
2014-08-21 16:12:49 +08:00
|
|
|
if ih != nil { // We already know what we want.
|
|
|
|
post(ih[:])
|
|
|
|
post(peerID[:])
|
|
|
|
}
|
|
|
|
var b [68]byte
|
|
|
|
_, err = io.ReadFull(sock, b[:68])
|
2013-10-20 22:07:01 +08:00
|
|
|
if err != nil {
|
2014-08-21 16:12:49 +08:00
|
|
|
err = nil
|
2013-10-14 22:39:12 +08:00
|
|
|
return
|
2013-09-29 14:45:17 +08:00
|
|
|
}
|
2014-05-21 16:01:58 +08:00
|
|
|
if string(b[:20]) != pp.Protocol {
|
2013-09-29 14:45:17 +08:00
|
|
|
return
|
|
|
|
}
|
2014-08-25 03:25:52 +08:00
|
|
|
CopyExact(&res.peerExtensionBytes, b[20:28])
|
|
|
|
CopyExact(&res.InfoHash, b[28:48])
|
|
|
|
CopyExact(&res.peerID, b[48:68])
|
2015-03-12 17:06:23 +08:00
|
|
|
peerExtensions.Add(hex.EncodeToString(res.peerExtensionBytes[:]), 1)
|
2014-08-21 16:12:49 +08:00
|
|
|
|
2015-03-12 17:06:23 +08:00
|
|
|
// TODO: Maybe we can just drop peers here if we're not interested. This
|
|
|
|
// could prevent them trying to reconnect, falsely believing there was
|
|
|
|
// just a problem.
|
2014-08-21 16:12:49 +08:00
|
|
|
if ih == nil { // We were waiting for the peer to tell us what they wanted.
|
|
|
|
post(res.InfoHash[:])
|
|
|
|
post(peerID[:])
|
2013-09-29 14:45:17 +08:00
|
|
|
}
|
2014-08-21 16:12:49 +08:00
|
|
|
|
|
|
|
ok = true
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-02-21 11:58:28 +08:00
|
|
|
// Wraps a raw connection and provides the interface we want for using the
|
|
|
|
// connection in the message loop.
|
2015-03-18 15:28:13 +08:00
|
|
|
type deadlineReader struct {
|
|
|
|
nc net.Conn
|
|
|
|
r io.Reader
|
2014-08-28 07:45:20 +08:00
|
|
|
}
|
|
|
|
|
2015-03-18 15:28:13 +08:00
|
|
|
func (me deadlineReader) Read(b []byte) (n int, err error) {
|
2014-11-20 10:02:20 +08:00
|
|
|
// Keep-alives should be received every 2 mins. Give a bit of gracetime.
|
2015-03-18 15:28:13 +08:00
|
|
|
err = me.nc.SetReadDeadline(time.Now().Add(150 * time.Second))
|
2014-08-28 07:45:20 +08:00
|
|
|
if err != nil {
|
2015-01-21 21:42:13 +08:00
|
|
|
err = fmt.Errorf("error setting read deadline: %s", err)
|
2014-08-28 07:45:20 +08:00
|
|
|
}
|
2015-03-18 15:28:13 +08:00
|
|
|
n, err = me.r.Read(b)
|
2015-02-21 11:58:28 +08:00
|
|
|
// Convert common errors into io.EOF.
|
2015-03-18 15:28:13 +08:00
|
|
|
// if err != nil {
|
|
|
|
// if opError, ok := err.(*net.OpError); ok && opError.Op == "read" && opError.Err == syscall.ECONNRESET {
|
|
|
|
// err = io.EOF
|
|
|
|
// } else if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
|
|
|
// if n != 0 {
|
|
|
|
// panic(n)
|
|
|
|
// }
|
|
|
|
// err = io.EOF
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
type readWriter struct {
|
|
|
|
io.Reader
|
|
|
|
io.Writer
|
|
|
|
}
|
|
|
|
|
|
|
|
func maybeReceiveEncryptedHandshake(rw io.ReadWriter, skeys [][]byte) (ret io.ReadWriter, encrypted bool, err error) {
|
|
|
|
var protocol [len(pp.Protocol)]byte
|
|
|
|
_, err = io.ReadFull(rw, protocol[:])
|
2014-09-14 01:45:38 +08:00
|
|
|
if err != nil {
|
2015-03-18 15:28:13 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
ret = readWriter{
|
|
|
|
io.MultiReader(bytes.NewReader(protocol[:]), rw),
|
|
|
|
rw,
|
|
|
|
}
|
|
|
|
if string(protocol[:]) == pp.Protocol {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
encrypted = true
|
|
|
|
ret, err = mse.ReceiveHandshake(ret, skeys)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) receiveSkeys() (ret [][]byte) {
|
|
|
|
for ih := range cl.torrents {
|
|
|
|
ret = append(ret, ih[:])
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) initiateHandshakes(c *connection, t *torrent) (ok bool, err error) {
|
|
|
|
if c.encrypted {
|
|
|
|
c.rw, err = mse.InitiateHandshake(c.rw, t.InfoHash[:], nil)
|
|
|
|
if err != nil {
|
|
|
|
return
|
2014-09-14 01:45:38 +08:00
|
|
|
}
|
|
|
|
}
|
2015-03-18 15:28:13 +08:00
|
|
|
ih, ok, err := me.connBTHandshake(c, &t.InfoHash)
|
|
|
|
if ih != t.InfoHash {
|
|
|
|
ok = false
|
|
|
|
}
|
2014-08-28 07:45:20 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-03-27 12:37:58 +08:00
|
|
|
// Do encryption and bittorrent handshakes as receiver.
|
2015-03-18 15:28:13 +08:00
|
|
|
func (cl *Client) receiveHandshakes(c *connection) (t *torrent, err error) {
|
|
|
|
cl.mu.Lock()
|
|
|
|
skeys := cl.receiveSkeys()
|
|
|
|
cl.mu.Unlock()
|
2015-04-20 15:30:22 +08:00
|
|
|
if !cl.config.DisableEncryption {
|
|
|
|
c.rw, c.encrypted, err = maybeReceiveEncryptedHandshake(c.rw, skeys)
|
|
|
|
if err != nil {
|
|
|
|
if err == mse.ErrNoSecretKeyMatch {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
return
|
2015-03-18 15:28:13 +08:00
|
|
|
}
|
2014-08-28 08:06:57 +08:00
|
|
|
}
|
2015-03-18 15:28:13 +08:00
|
|
|
ih, ok, err := cl.connBTHandshake(c, nil)
|
2015-02-09 21:17:59 +08:00
|
|
|
if err != nil {
|
2015-06-28 14:39:04 +08:00
|
|
|
err = fmt.Errorf("error during bt handshake: %s", err)
|
2015-02-09 21:17:59 +08:00
|
|
|
return
|
|
|
|
}
|
2015-03-18 15:28:13 +08:00
|
|
|
if !ok {
|
|
|
|
return
|
2015-03-13 03:21:13 +08:00
|
|
|
}
|
2015-03-18 15:28:13 +08:00
|
|
|
cl.mu.Lock()
|
|
|
|
t = cl.torrents[ih]
|
|
|
|
cl.mu.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns !ok if handshake failed for valid reasons.
|
|
|
|
func (cl *Client) connBTHandshake(c *connection, ih *InfoHash) (ret InfoHash, ok bool, err error) {
|
|
|
|
res, ok, err := handshake(c.rw, ih, cl.peerID, cl.extensionBytes)
|
|
|
|
if err != nil || !ok {
|
2015-03-13 03:21:13 +08:00
|
|
|
return
|
|
|
|
}
|
2015-03-18 15:28:13 +08:00
|
|
|
ret = res.InfoHash
|
|
|
|
c.PeerExtensionBytes = res.peerExtensionBytes
|
|
|
|
c.PeerID = res.peerID
|
|
|
|
c.completedHandshake = time.Now()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) runInitiatedHandshookConn(c *connection, t *torrent) (err error) {
|
|
|
|
if c.PeerID == cl.peerID {
|
|
|
|
// Only if we initiated the connection is the remote address a
|
|
|
|
// listen addr for a doppleganger.
|
|
|
|
connsToSelf.Add(1)
|
|
|
|
addr := c.conn.RemoteAddr().String()
|
|
|
|
cl.dopplegangerAddrs[addr] = struct{}{}
|
|
|
|
return
|
2014-08-28 07:45:20 +08:00
|
|
|
}
|
2015-03-18 15:28:13 +08:00
|
|
|
return cl.runHandshookConn(c, t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) runReceivedConn(c *connection) (err error) {
|
|
|
|
err = c.conn.SetDeadline(time.Now().Add(handshakesTimeout))
|
2014-08-21 16:12:49 +08:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2015-03-18 15:28:13 +08:00
|
|
|
t, err := cl.receiveHandshakes(c)
|
|
|
|
if err != nil {
|
2015-03-27 12:37:58 +08:00
|
|
|
err = fmt.Errorf("error receiving handshakes: %s", err)
|
2014-08-21 16:12:49 +08:00
|
|
|
return
|
2013-09-29 14:45:17 +08:00
|
|
|
}
|
2015-03-18 15:28:13 +08:00
|
|
|
if t == nil {
|
2014-11-17 03:54:00 +08:00
|
|
|
return
|
|
|
|
}
|
2015-03-18 15:28:13 +08:00
|
|
|
cl.mu.Lock()
|
|
|
|
defer cl.mu.Unlock()
|
|
|
|
if c.PeerID == cl.peerID {
|
2014-08-21 16:12:49 +08:00
|
|
|
return
|
|
|
|
}
|
2015-03-18 15:28:13 +08:00
|
|
|
return cl.runHandshookConn(c, t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) runHandshookConn(c *connection, t *torrent) (err error) {
|
|
|
|
c.conn.SetWriteDeadline(time.Time{})
|
|
|
|
c.rw = readWriter{
|
|
|
|
deadlineReader{c.conn, c.rw},
|
|
|
|
c.rw,
|
|
|
|
}
|
2015-08-02 02:06:22 +08:00
|
|
|
completedHandshakeConnectionFlags.Add(c.connectionFlags(), 1)
|
2015-03-18 15:28:13 +08:00
|
|
|
if !cl.addConnection(t, c) {
|
2014-03-16 23:30:10 +08:00
|
|
|
return
|
|
|
|
}
|
2015-03-18 15:28:13 +08:00
|
|
|
defer cl.dropConnection(t, c)
|
|
|
|
go c.writer()
|
|
|
|
go c.writeOptimizer(time.Minute)
|
|
|
|
cl.sendInitialMessages(c, t)
|
|
|
|
if t.haveInfo() {
|
|
|
|
t.initRequestOrdering(c)
|
|
|
|
}
|
|
|
|
err = cl.connectionLoop(t, c)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error during connection loop: %s", err)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) sendInitialMessages(conn *connection, torrent *torrent) {
|
2015-03-13 03:21:13 +08:00
|
|
|
if conn.PeerExtensionBytes.SupportsExtended() && me.extensionBytes.SupportsExtended() {
|
2014-06-26 22:57:07 +08:00
|
|
|
conn.Post(pp.Message{
|
|
|
|
Type: pp.Extended,
|
|
|
|
ExtendedID: pp.HandshakeExtendedID,
|
|
|
|
ExtendedPayload: func() []byte {
|
2014-06-28 17:38:31 +08:00
|
|
|
d := map[string]interface{}{
|
2015-03-25 12:49:27 +08:00
|
|
|
"m": func() (ret map[string]int) {
|
|
|
|
ret = make(map[string]int, 2)
|
|
|
|
ret["ut_metadata"] = metadataExtendedId
|
|
|
|
if !me.config.DisablePEX {
|
|
|
|
ret["ut_pex"] = pexExtendedId
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}(),
|
2015-03-25 12:42:14 +08:00
|
|
|
"v": extendedHandshakeClientVersion,
|
2014-08-25 20:12:50 +08:00
|
|
|
// No upload queue is implemented yet.
|
2015-05-15 06:39:53 +08:00
|
|
|
"reqq": 64,
|
2015-04-20 15:30:22 +08:00
|
|
|
}
|
|
|
|
if !me.config.DisableEncryption {
|
|
|
|
d["e"] = 1
|
2014-06-28 17:38:31 +08:00
|
|
|
}
|
|
|
|
if torrent.metadataSizeKnown() {
|
|
|
|
d["metadata_size"] = torrent.metadataSize()
|
|
|
|
}
|
2014-06-29 16:57:49 +08:00
|
|
|
if p := me.incomingPeerPort(); p != 0 {
|
|
|
|
d["p"] = p
|
|
|
|
}
|
2015-03-13 03:21:13 +08:00
|
|
|
yourip, err := addrCompactIP(conn.remoteAddr())
|
2014-07-22 19:45:12 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Printf("error calculating yourip field value in extension handshake: %s", err)
|
|
|
|
} else {
|
|
|
|
d["yourip"] = yourip
|
|
|
|
}
|
2014-07-24 11:43:45 +08:00
|
|
|
// log.Printf("sending %v", d)
|
2014-06-28 17:38:31 +08:00
|
|
|
b, err := bencode.Marshal(d)
|
2014-06-26 22:57:07 +08:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return b
|
|
|
|
}(),
|
|
|
|
})
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
if torrent.haveAnyPieces() {
|
2016-01-04 19:37:49 +08:00
|
|
|
conn.Bitfield(torrent.bitfield())
|
2015-03-13 03:21:13 +08:00
|
|
|
} else if me.extensionBytes.SupportsFast() && conn.PeerExtensionBytes.SupportsFast() {
|
|
|
|
conn.Post(pp.Message{
|
|
|
|
Type: pp.HaveNone,
|
|
|
|
})
|
2013-10-20 22:07:01 +08:00
|
|
|
}
|
2015-03-13 03:21:13 +08:00
|
|
|
if conn.PeerExtensionBytes.SupportsDHT() && me.extensionBytes.SupportsDHT() && me.dHT != nil {
|
2014-08-25 20:12:16 +08:00
|
|
|
conn.Post(pp.Message{
|
|
|
|
Type: pp.Port,
|
2015-04-01 14:29:55 +08:00
|
|
|
Port: uint16(AddrPort(me.dHT.Addr())),
|
2014-08-25 20:12:16 +08:00
|
|
|
})
|
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
|
|
|
|
2015-03-18 15:28:13 +08:00
|
|
|
// Randomizes the piece order for this connection. Every connection will be
|
|
|
|
// given a different ordering. Having it stored per connection saves having to
|
|
|
|
// randomize during request filling, and constantly recalculate the ordering
|
|
|
|
// based on piece priorities.
|
2014-12-03 08:42:22 +08:00
|
|
|
func (t *torrent) initRequestOrdering(c *connection) {
|
2014-12-03 08:22:38 +08:00
|
|
|
if c.pieceRequestOrder != nil || c.piecePriorities != nil {
|
|
|
|
panic("double init of request ordering")
|
|
|
|
}
|
|
|
|
c.pieceRequestOrder = pieceordering.New()
|
2015-03-18 15:28:13 +08:00
|
|
|
for i := range iter.N(t.Info.NumPieces()) {
|
2015-03-12 17:06:23 +08:00
|
|
|
if !c.PeerHasPiece(i) {
|
2014-12-03 08:22:38 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !t.wantPiece(i) {
|
|
|
|
continue
|
|
|
|
}
|
2014-12-03 15:07:50 +08:00
|
|
|
t.connPendPiece(c, i)
|
2014-12-03 08:22:38 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-03 23:17:32 +08:00
|
|
|
func (me *Client) peerGotPiece(t *torrent, c *connection, piece int) error {
|
2015-03-12 17:06:23 +08:00
|
|
|
if !c.peerHasAll {
|
|
|
|
if t.haveInfo() {
|
|
|
|
if c.PeerPieces == nil {
|
|
|
|
c.PeerPieces = make([]bool, t.numPieces())
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for piece >= len(c.PeerPieces) {
|
|
|
|
c.PeerPieces = append(c.PeerPieces, false)
|
|
|
|
}
|
2014-12-03 08:43:05 +08:00
|
|
|
}
|
2015-08-03 23:17:32 +08:00
|
|
|
if piece >= len(c.PeerPieces) {
|
|
|
|
return errors.New("peer got out of range piece index")
|
|
|
|
}
|
2015-03-12 17:06:23 +08:00
|
|
|
c.PeerPieces[piece] = true
|
2013-10-01 16:43:18 +08:00
|
|
|
}
|
2014-12-01 17:32:17 +08:00
|
|
|
if t.wantPiece(piece) {
|
2014-12-03 15:07:50 +08:00
|
|
|
t.connPendPiece(c, piece)
|
2014-06-26 22:57:07 +08:00
|
|
|
me.replenishConnRequests(t, c)
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
2015-08-03 23:17:32 +08:00
|
|
|
return nil
|
2013-10-01 16:43:18 +08:00
|
|
|
}
|
|
|
|
|
2014-04-09 00:36:05 +08:00
|
|
|
func (me *Client) peerUnchoked(torrent *torrent, conn *connection) {
|
2013-10-01 16:43:18 +08:00
|
|
|
me.replenishConnRequests(torrent, conn)
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
|
|
|
|
2014-05-23 19:01:05 +08:00
|
|
|
func (cl *Client) connCancel(t *torrent, cn *connection, r request) (ok bool) {
|
|
|
|
ok = cn.Cancel(r)
|
|
|
|
if ok {
|
2014-08-24 01:10:47 +08:00
|
|
|
postedCancels.Add(1)
|
2014-05-23 19:01:05 +08:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-06-22 17:48:30 +08:00
|
|
|
func (cl *Client) connDeleteRequest(t *torrent, cn *connection, r request) bool {
|
2014-05-23 19:01:05 +08:00
|
|
|
if !cn.RequestPending(r) {
|
2015-06-22 17:48:30 +08:00
|
|
|
return false
|
2014-05-23 19:01:05 +08:00
|
|
|
}
|
|
|
|
delete(cn.Requests, r)
|
2015-06-22 17:48:30 +08:00
|
|
|
return true
|
2014-05-23 19:01:05 +08:00
|
|
|
}
|
|
|
|
|
2014-06-26 22:57:07 +08:00
|
|
|
func (cl *Client) requestPendingMetadata(t *torrent, c *connection) {
|
2014-06-27 16:57:35 +08:00
|
|
|
if t.haveInfo() {
|
|
|
|
return
|
|
|
|
}
|
2015-03-27 12:36:59 +08:00
|
|
|
if c.PeerExtensionIDs["ut_metadata"] == 0 {
|
|
|
|
// Peer doesn't support this.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Request metadata pieces that we don't have in a random order.
|
2014-06-26 22:57:07 +08:00
|
|
|
var pending []int
|
2015-02-25 12:42:47 +08:00
|
|
|
for index := 0; index < t.metadataPieceCount(); index++ {
|
2015-03-27 12:36:59 +08:00
|
|
|
if !t.haveMetadataPiece(index) && !c.requestedMetadataPiece(index) {
|
2014-06-26 22:57:07 +08:00
|
|
|
pending = append(pending, index)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, i := range mathRand.Perm(len(pending)) {
|
2015-03-27 12:36:59 +08:00
|
|
|
c.requestMetadataPiece(pending[i])
|
2014-06-26 22:57:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-28 17:38:31 +08:00
|
|
|
func (cl *Client) completedMetadata(t *torrent) {
|
|
|
|
h := sha1.New()
|
|
|
|
h.Write(t.MetaData)
|
|
|
|
var ih InfoHash
|
2014-08-21 16:12:49 +08:00
|
|
|
CopyExact(&ih, h.Sum(nil))
|
2014-06-28 17:38:31 +08:00
|
|
|
if ih != t.InfoHash {
|
|
|
|
log.Print("bad metadata")
|
2015-02-25 12:42:47 +08:00
|
|
|
t.invalidateMetadata()
|
2014-06-28 17:38:31 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
var info metainfo.Info
|
|
|
|
err := bencode.Unmarshal(t.MetaData, &info)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error unmarshalling metadata: %s", err)
|
2015-02-25 12:42:47 +08:00
|
|
|
t.invalidateMetadata()
|
2014-06-28 17:38:31 +08:00
|
|
|
return
|
|
|
|
}
|
2014-07-14 21:12:52 +08:00
|
|
|
// TODO(anacrolix): If this fails, I think something harsher should be
|
|
|
|
// done.
|
2015-03-18 15:32:31 +08:00
|
|
|
err = cl.setMetaData(t, &info, t.MetaData)
|
2014-06-29 13:45:21 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Printf("error setting metadata: %s", err)
|
2015-02-25 12:42:47 +08:00
|
|
|
t.invalidateMetadata()
|
2014-06-29 13:45:21 +08:00
|
|
|
return
|
|
|
|
}
|
2015-09-17 10:40:35 +08:00
|
|
|
if cl.config.Debug {
|
|
|
|
log.Printf("%s: got metadata from peers", t)
|
|
|
|
}
|
2014-06-28 17:38:31 +08:00
|
|
|
}
|
|
|
|
|
2014-08-21 16:12:49 +08:00
|
|
|
// Process incoming ut_metadata message.
|
2014-06-28 17:38:31 +08:00
|
|
|
func (cl *Client) gotMetadataExtensionMsg(payload []byte, t *torrent, c *connection) (err error) {
|
|
|
|
var d map[string]int
|
|
|
|
err = bencode.Unmarshal(payload, &d)
|
|
|
|
if err != nil {
|
2014-06-30 22:05:28 +08:00
|
|
|
err = fmt.Errorf("error unmarshalling payload: %s: %q", err, payload)
|
2014-06-28 17:38:31 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
msgType, ok := d["msg_type"]
|
|
|
|
if !ok {
|
|
|
|
err = errors.New("missing msg_type field")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
piece := d["piece"]
|
|
|
|
switch msgType {
|
|
|
|
case pp.DataMetadataExtensionMsgType:
|
|
|
|
if t.haveInfo() {
|
|
|
|
break
|
|
|
|
}
|
2014-11-19 11:57:27 +08:00
|
|
|
begin := len(payload) - metadataPieceSize(d["total_size"], piece)
|
|
|
|
if begin < 0 || begin >= len(payload) {
|
|
|
|
log.Printf("got bad metadata piece")
|
|
|
|
break
|
|
|
|
}
|
2015-03-27 12:36:59 +08:00
|
|
|
if !c.requestedMetadataPiece(piece) {
|
|
|
|
log.Printf("got unexpected metadata piece %d", piece)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
c.metadataRequests[piece] = false
|
2015-03-20 07:52:01 +08:00
|
|
|
t.saveMetadataPiece(piece, payload[begin:])
|
2014-12-01 17:32:17 +08:00
|
|
|
c.UsefulChunksReceived++
|
|
|
|
c.lastUsefulChunkReceived = time.Now()
|
2015-02-25 12:42:47 +08:00
|
|
|
if !t.haveAllMetadataPieces() {
|
2014-06-28 17:38:31 +08:00
|
|
|
break
|
|
|
|
}
|
|
|
|
cl.completedMetadata(t)
|
|
|
|
case pp.RequestMetadataExtensionMsgType:
|
2015-02-25 12:42:47 +08:00
|
|
|
if !t.haveMetadataPiece(piece) {
|
|
|
|
c.Post(t.newMetadataExtensionMessage(c, pp.RejectMetadataExtensionMsgType, d["piece"], nil))
|
2014-06-28 17:38:31 +08:00
|
|
|
break
|
|
|
|
}
|
2014-08-21 16:12:49 +08:00
|
|
|
start := (1 << 14) * piece
|
2015-02-25 12:42:47 +08:00
|
|
|
c.Post(t.newMetadataExtensionMessage(c, pp.DataMetadataExtensionMsgType, piece, t.MetaData[start:start+t.metadataPieceSize(piece)]))
|
2014-06-28 17:38:31 +08:00
|
|
|
case pp.RejectMetadataExtensionMsgType:
|
|
|
|
default:
|
|
|
|
err = errors.New("unknown msg_type value")
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-11-17 03:16:26 +08:00
|
|
|
// Extracts the port as an integer from an address string.
|
|
|
|
func addrPort(addr net.Addr) int {
|
2014-11-17 11:20:49 +08:00
|
|
|
return AddrPort(addr)
|
2014-11-17 03:16:26 +08:00
|
|
|
}
|
|
|
|
|
2015-03-12 17:06:23 +08:00
|
|
|
func (cl *Client) peerHasAll(t *torrent, cn *connection) {
|
|
|
|
cn.peerHasAll = true
|
|
|
|
cn.PeerPieces = nil
|
|
|
|
if t.haveInfo() {
|
|
|
|
for i := 0; i < t.numPieces(); i++ {
|
|
|
|
cl.peerGotPiece(t, cn, i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-16 14:57:47 +08:00
|
|
|
func (me *Client) upload(t *torrent, c *connection) {
|
|
|
|
if me.config.NoUpload {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !c.PeerInterested {
|
|
|
|
return
|
|
|
|
}
|
2015-07-15 13:29:53 +08:00
|
|
|
seeding := me.seeding(t)
|
|
|
|
if !seeding && !t.connHasWantedPieces(c) {
|
2015-06-16 14:57:47 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
another:
|
2015-07-15 13:29:53 +08:00
|
|
|
for seeding || c.chunksSent < c.UsefulChunksReceived+6 {
|
2015-06-16 14:57:47 +08:00
|
|
|
c.Unchoke()
|
|
|
|
for r := range c.PeerRequests {
|
|
|
|
err := me.sendChunk(t, c, r)
|
|
|
|
if err != nil {
|
2015-09-17 10:40:35 +08:00
|
|
|
log.Printf("error sending chunk %+v to peer: %s", r, err)
|
2015-06-16 14:57:47 +08:00
|
|
|
}
|
|
|
|
delete(c.PeerRequests, r)
|
|
|
|
goto another
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.Choke()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) sendChunk(t *torrent, c *connection, r request) error {
|
2015-09-17 10:50:29 +08:00
|
|
|
// Count the chunk being sent, even if it isn't.
|
|
|
|
c.chunksSent++
|
2015-06-16 14:57:47 +08:00
|
|
|
b := make([]byte, r.Length)
|
2015-10-16 19:10:03 +08:00
|
|
|
tp := &t.Pieces[r.Index]
|
2015-08-05 00:40:46 +08:00
|
|
|
tp.pendingWritesMutex.Lock()
|
|
|
|
for tp.pendingWrites != 0 {
|
|
|
|
tp.noPendingWrites.Wait()
|
|
|
|
}
|
|
|
|
tp.pendingWritesMutex.Unlock()
|
2015-06-16 14:57:47 +08:00
|
|
|
p := t.Info.Piece(int(r.Index))
|
|
|
|
n, err := dataReadAt(t.data, b, p.Offset()+int64(r.Begin))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if n != len(b) {
|
|
|
|
log.Fatal(b)
|
|
|
|
}
|
|
|
|
c.Post(pp.Message{
|
|
|
|
Type: pp.Piece,
|
|
|
|
Index: r.Index,
|
|
|
|
Begin: r.Begin,
|
|
|
|
Piece: b,
|
|
|
|
})
|
|
|
|
uploadChunksPosted.Add(1)
|
|
|
|
c.lastChunkSent = time.Now()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-07-24 11:43:45 +08:00
|
|
|
// Processes incoming bittorrent messages. The client lock is held upon entry
|
|
|
|
// and exit.
|
2014-05-21 15:55:50 +08:00
|
|
|
func (me *Client) connectionLoop(t *torrent, c *connection) error {
|
2014-05-21 16:01:58 +08:00
|
|
|
decoder := pp.Decoder{
|
2015-03-13 03:21:13 +08:00
|
|
|
R: bufio.NewReader(c.rw),
|
2013-09-30 19:51:08 +08:00
|
|
|
MaxLength: 256 * 1024,
|
|
|
|
}
|
|
|
|
for {
|
2013-10-20 22:07:01 +08:00
|
|
|
me.mu.Unlock()
|
2014-05-21 16:01:58 +08:00
|
|
|
var msg pp.Message
|
2014-05-21 15:48:44 +08:00
|
|
|
err := decoder.Decode(&msg)
|
2015-03-18 15:37:26 +08:00
|
|
|
receivedMessageTypes.Add(strconv.FormatInt(int64(msg.Type), 10), 1)
|
2013-10-20 22:07:01 +08:00
|
|
|
me.mu.Lock()
|
2014-08-28 07:31:05 +08:00
|
|
|
c.lastMessageReceived = time.Now()
|
|
|
|
select {
|
|
|
|
case <-c.closing:
|
2014-06-26 16:06:33 +08:00
|
|
|
return nil
|
2014-08-28 07:31:05 +08:00
|
|
|
default:
|
2014-06-26 16:06:33 +08:00
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
if err != nil {
|
2014-03-20 21:14:17 +08:00
|
|
|
if me.stopped() || err == io.EOF {
|
2014-03-20 13:58:09 +08:00
|
|
|
return nil
|
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if msg.Keepalive {
|
|
|
|
continue
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
switch msg.Type {
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.Choke:
|
2014-05-21 15:55:50 +08:00
|
|
|
c.PeerChoked = true
|
2014-05-23 19:01:05 +08:00
|
|
|
for r := range c.Requests {
|
|
|
|
me.connDeleteRequest(t, c, r)
|
|
|
|
}
|
2014-12-30 20:58:38 +08:00
|
|
|
// We can then reset our interest.
|
|
|
|
me.replenishConnRequests(t, c)
|
2015-03-12 17:06:23 +08:00
|
|
|
case pp.Reject:
|
|
|
|
me.connDeleteRequest(t, c, newRequest(msg.Index, msg.Begin, msg.Length))
|
2015-03-13 03:21:13 +08:00
|
|
|
me.replenishConnRequests(t, c)
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.Unchoke:
|
2014-05-21 15:55:50 +08:00
|
|
|
c.PeerChoked = false
|
|
|
|
me.peerUnchoked(t, c)
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.Interested:
|
2014-05-21 15:55:50 +08:00
|
|
|
c.PeerInterested = true
|
2015-06-16 14:57:47 +08:00
|
|
|
me.upload(t, c)
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.NotInterested:
|
2014-05-21 15:55:50 +08:00
|
|
|
c.PeerInterested = false
|
|
|
|
c.Choke()
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.Have:
|
2014-05-21 15:55:50 +08:00
|
|
|
me.peerGotPiece(t, c, int(msg.Index))
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.Request:
|
2015-06-02 22:03:43 +08:00
|
|
|
if c.Choked {
|
2014-08-25 04:00:29 +08:00
|
|
|
break
|
|
|
|
}
|
2015-06-16 14:57:47 +08:00
|
|
|
if !c.PeerInterested {
|
|
|
|
err = errors.New("peer sent request but isn't interested")
|
|
|
|
break
|
2014-03-17 22:44:22 +08:00
|
|
|
}
|
2015-06-16 14:57:47 +08:00
|
|
|
if c.PeerRequests == nil {
|
|
|
|
c.PeerRequests = make(map[request]struct{}, maxRequests)
|
2014-03-17 22:44:22 +08:00
|
|
|
}
|
2015-06-16 14:57:47 +08:00
|
|
|
c.PeerRequests[newRequest(msg.Index, msg.Begin, msg.Length)] = struct{}{}
|
|
|
|
me.upload(t, c)
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.Cancel:
|
2014-04-16 15:33:33 +08:00
|
|
|
req := newRequest(msg.Index, msg.Begin, msg.Length)
|
2014-05-21 15:55:50 +08:00
|
|
|
if !c.PeerCancel(req) {
|
2014-08-22 15:47:44 +08:00
|
|
|
unexpectedCancels.Add(1)
|
2014-04-16 15:33:33 +08:00
|
|
|
}
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.Bitfield:
|
2015-03-12 17:06:23 +08:00
|
|
|
if c.PeerPieces != nil || c.peerHasAll {
|
2013-10-20 22:07:01 +08:00
|
|
|
err = errors.New("received unexpected bitfield")
|
|
|
|
break
|
|
|
|
}
|
2014-06-26 22:57:07 +08:00
|
|
|
if t.haveInfo() {
|
2015-02-09 21:12:29 +08:00
|
|
|
if len(msg.Bitfield) < t.numPieces() {
|
2014-06-26 22:57:07 +08:00
|
|
|
err = errors.New("received invalid bitfield")
|
|
|
|
break
|
|
|
|
}
|
2015-02-09 21:12:29 +08:00
|
|
|
msg.Bitfield = msg.Bitfield[:t.numPieces()]
|
2014-06-26 22:57:07 +08:00
|
|
|
}
|
|
|
|
c.PeerPieces = msg.Bitfield
|
2014-05-21 15:55:50 +08:00
|
|
|
for index, has := range c.PeerPieces {
|
2013-10-20 22:07:01 +08:00
|
|
|
if has {
|
2014-05-21 15:55:50 +08:00
|
|
|
me.peerGotPiece(t, c, index)
|
2013-10-02 15:57:59 +08:00
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
2015-03-12 17:06:23 +08:00
|
|
|
case pp.HaveAll:
|
|
|
|
if c.PeerPieces != nil || c.peerHasAll {
|
|
|
|
err = errors.New("unexpected have-all")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
me.peerHasAll(t, c)
|
|
|
|
case pp.HaveNone:
|
|
|
|
if c.peerHasAll || c.PeerPieces != nil {
|
|
|
|
err = errors.New("unexpected have-none")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
c.PeerPieces = make([]bool, func() int {
|
|
|
|
if t.haveInfo() {
|
|
|
|
return t.numPieces()
|
|
|
|
} else {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}())
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.Piece:
|
2014-05-21 15:55:50 +08:00
|
|
|
err = me.downloadedChunk(t, c, &msg)
|
2014-06-26 22:57:07 +08:00
|
|
|
case pp.Extended:
|
|
|
|
switch msg.ExtendedID {
|
|
|
|
case pp.HandshakeExtendedID:
|
2014-06-29 16:57:49 +08:00
|
|
|
// TODO: Create a bencode struct for this.
|
2014-06-26 22:57:07 +08:00
|
|
|
var d map[string]interface{}
|
|
|
|
err = bencode.Unmarshal(msg.ExtendedPayload, &d)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error decoding extended message payload: %s", err)
|
|
|
|
break
|
|
|
|
}
|
2014-11-17 13:27:01 +08:00
|
|
|
// log.Printf("got handshake from %q: %#v", c.Socket.RemoteAddr().String(), d)
|
2014-06-29 16:57:49 +08:00
|
|
|
if reqq, ok := d["reqq"]; ok {
|
|
|
|
if i, ok := reqq.(int64); ok {
|
|
|
|
c.PeerMaxRequests = int(i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if v, ok := d["v"]; ok {
|
|
|
|
c.PeerClientName = v.(string)
|
|
|
|
}
|
2014-06-26 22:57:07 +08:00
|
|
|
m, ok := d["m"]
|
|
|
|
if !ok {
|
|
|
|
err = errors.New("handshake missing m item")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
mTyped, ok := m.(map[string]interface{})
|
|
|
|
if !ok {
|
|
|
|
err = errors.New("handshake m value is not dict")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if c.PeerExtensionIDs == nil {
|
2015-03-27 12:36:59 +08:00
|
|
|
c.PeerExtensionIDs = make(map[string]byte, len(mTyped))
|
2014-06-26 22:57:07 +08:00
|
|
|
}
|
|
|
|
for name, v := range mTyped {
|
|
|
|
id, ok := v.(int64)
|
|
|
|
if !ok {
|
|
|
|
log.Printf("bad handshake m item extension ID type: %T", v)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if id == 0 {
|
|
|
|
delete(c.PeerExtensionIDs, name)
|
|
|
|
} else {
|
2015-03-18 15:37:26 +08:00
|
|
|
if c.PeerExtensionIDs[name] == 0 {
|
|
|
|
supportedExtensionMessages.Add(name, 1)
|
|
|
|
}
|
2015-03-27 12:36:59 +08:00
|
|
|
c.PeerExtensionIDs[name] = byte(id)
|
2014-06-26 22:57:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
metadata_sizeUntyped, ok := d["metadata_size"]
|
|
|
|
if ok {
|
|
|
|
metadata_size, ok := metadata_sizeUntyped.(int64)
|
|
|
|
if !ok {
|
|
|
|
log.Printf("bad metadata_size type: %T", metadata_sizeUntyped)
|
|
|
|
} else {
|
2015-03-27 12:36:59 +08:00
|
|
|
t.setMetadataSize(metadata_size, me)
|
2014-06-26 22:57:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if _, ok := c.PeerExtensionIDs["ut_metadata"]; ok {
|
|
|
|
me.requestPendingMetadata(t, c)
|
|
|
|
}
|
2015-03-25 12:42:14 +08:00
|
|
|
case metadataExtendedId:
|
2014-06-28 17:38:31 +08:00
|
|
|
err = me.gotMetadataExtensionMsg(msg.ExtendedPayload, t, c)
|
2014-06-30 22:05:28 +08:00
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error handling metadata extension message: %s", err)
|
|
|
|
}
|
2015-03-25 12:42:14 +08:00
|
|
|
case pexExtendedId:
|
|
|
|
if me.config.DisablePEX {
|
|
|
|
break
|
|
|
|
}
|
2014-06-29 17:07:43 +08:00
|
|
|
var pexMsg peerExchangeMessage
|
|
|
|
err := bencode.Unmarshal(msg.ExtendedPayload, &pexMsg)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error unmarshalling PEX message: %s", err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
go func() {
|
2015-03-08 14:28:14 +08:00
|
|
|
me.mu.Lock()
|
|
|
|
me.addPeers(t, func() (ret []Peer) {
|
2015-03-18 15:37:26 +08:00
|
|
|
for i, cp := range pexMsg.Added {
|
2014-06-29 17:07:43 +08:00
|
|
|
p := Peer{
|
2014-07-16 15:06:18 +08:00
|
|
|
IP: make([]byte, 4),
|
|
|
|
Port: int(cp.Port),
|
|
|
|
Source: peerSourcePEX,
|
2014-06-29 17:07:43 +08:00
|
|
|
}
|
2015-03-18 15:37:26 +08:00
|
|
|
if i < len(pexMsg.AddedFlags) && pexMsg.AddedFlags[i]&0x01 != 0 {
|
|
|
|
p.SupportsEncryption = true
|
2014-06-29 17:07:43 +08:00
|
|
|
}
|
2015-03-18 15:37:26 +08:00
|
|
|
CopyExact(p.IP, cp.IP[:])
|
2014-06-29 17:07:43 +08:00
|
|
|
ret = append(ret, p)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}())
|
2015-03-08 14:28:14 +08:00
|
|
|
me.mu.Unlock()
|
2014-06-29 17:07:43 +08:00
|
|
|
}()
|
2014-06-28 17:38:31 +08:00
|
|
|
default:
|
2014-07-10 00:59:37 +08:00
|
|
|
err = fmt.Errorf("unexpected extended message ID: %v", msg.ExtendedID)
|
2014-06-26 22:57:07 +08:00
|
|
|
}
|
2014-08-21 16:12:49 +08:00
|
|
|
if err != nil {
|
2014-09-11 12:20:47 +08:00
|
|
|
// That client uses its own extension IDs for outgoing message
|
|
|
|
// types, which is incorrect.
|
2014-09-14 01:47:47 +08:00
|
|
|
if bytes.HasPrefix(c.PeerID[:], []byte("-SD0100-")) ||
|
|
|
|
strings.HasPrefix(string(c.PeerID[:]), "-XL0012-") {
|
2014-09-11 12:20:47 +08:00
|
|
|
return nil
|
|
|
|
}
|
2014-08-21 16:12:49 +08:00
|
|
|
}
|
2014-08-25 20:12:16 +08:00
|
|
|
case pp.Port:
|
|
|
|
if me.dHT == nil {
|
|
|
|
break
|
|
|
|
}
|
2015-03-13 03:21:13 +08:00
|
|
|
pingAddr, err := net.ResolveUDPAddr("", c.remoteAddr().String())
|
2014-11-17 03:16:26 +08:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
if msg.Port != 0 {
|
|
|
|
pingAddr.Port = int(msg.Port)
|
|
|
|
}
|
|
|
|
_, err = me.dHT.Ping(pingAddr)
|
2013-10-20 22:07:01 +08:00
|
|
|
default:
|
2014-05-21 15:42:06 +08:00
|
|
|
err = fmt.Errorf("received unknown message type: %#v", msg.Type)
|
2013-10-20 22:07:01 +08:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-29 22:45:26 +08:00
|
|
|
// Returns true if connection is removed from torrent.Conns.
|
|
|
|
func (me *Client) deleteConnection(t *torrent, c *connection) bool {
|
|
|
|
for i0, _c := range t.Conns {
|
|
|
|
if _c != c {
|
2013-09-30 19:51:08 +08:00
|
|
|
continue
|
|
|
|
}
|
2015-06-29 22:45:26 +08:00
|
|
|
i1 := len(t.Conns) - 1
|
2013-09-30 19:51:08 +08:00
|
|
|
if i0 != i1 {
|
2015-06-29 22:45:26 +08:00
|
|
|
t.Conns[i0] = t.Conns[i1]
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
2015-06-29 22:45:26 +08:00
|
|
|
t.Conns = t.Conns[:i1]
|
|
|
|
return true
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
2015-06-29 22:45:26 +08:00
|
|
|
return false
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
|
|
|
|
2015-06-29 22:45:26 +08:00
|
|
|
func (me *Client) dropConnection(t *torrent, c *connection) {
|
|
|
|
me.event.Broadcast()
|
|
|
|
c.Close()
|
2015-09-26 15:27:35 +08:00
|
|
|
if c.piecePriorities != nil {
|
|
|
|
t.connPiecePriorites.Put(c.piecePriorities)
|
|
|
|
// I wonder if it's safe to set it to nil. Probably not. Since it's
|
|
|
|
// only read, it doesn't particularly matter if a closing connection
|
|
|
|
// shares the slice with another connection.
|
|
|
|
}
|
2015-06-29 22:45:26 +08:00
|
|
|
if me.deleteConnection(t, c) {
|
|
|
|
me.openNewConns(t)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true if the connection is added.
|
2014-04-09 00:36:05 +08:00
|
|
|
func (me *Client) addConnection(t *torrent, c *connection) bool {
|
2014-06-26 16:06:33 +08:00
|
|
|
if me.stopped() {
|
|
|
|
return false
|
|
|
|
}
|
2014-08-28 07:39:27 +08:00
|
|
|
select {
|
|
|
|
case <-t.ceasingNetworking:
|
|
|
|
return false
|
|
|
|
default:
|
|
|
|
}
|
2014-12-03 15:07:50 +08:00
|
|
|
if !me.wantConns(t) {
|
|
|
|
return false
|
|
|
|
}
|
2014-03-16 23:30:10 +08:00
|
|
|
for _, c0 := range t.Conns {
|
2014-08-21 16:12:49 +08:00
|
|
|
if c.PeerID == c0.PeerID {
|
2014-05-21 15:42:06 +08:00
|
|
|
// Already connected to a client with that ID.
|
2015-03-18 15:37:26 +08:00
|
|
|
duplicateClientConns.Add(1)
|
2013-09-30 19:51:08 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
2015-06-29 22:45:26 +08:00
|
|
|
if len(t.Conns) >= socketsPerTorrent {
|
|
|
|
c := t.worstBadConn(me)
|
|
|
|
if c == nil {
|
|
|
|
return false
|
|
|
|
}
|
2015-08-23 10:59:03 +08:00
|
|
|
if me.config.Debug && missinggo.CryHeard() {
|
2015-08-06 06:56:36 +08:00
|
|
|
log.Printf("%s: dropping connection to make room for new one:\n %s", t, c)
|
|
|
|
}
|
2015-06-29 22:45:26 +08:00
|
|
|
c.Close()
|
|
|
|
me.deleteConnection(t, c)
|
2014-08-28 08:06:36 +08:00
|
|
|
}
|
2015-06-29 22:45:26 +08:00
|
|
|
if len(t.Conns) >= socketsPerTorrent {
|
|
|
|
panic(len(t.Conns))
|
|
|
|
}
|
|
|
|
t.Conns = append(t.Conns, c)
|
2013-09-30 19:51:08 +08:00
|
|
|
return true
|
2013-09-29 06:11:24 +08:00
|
|
|
}
|
|
|
|
|
2014-12-03 15:07:50 +08:00
|
|
|
func (t *torrent) needData() bool {
|
|
|
|
if !t.haveInfo() {
|
|
|
|
return true
|
|
|
|
}
|
2015-07-17 19:07:01 +08:00
|
|
|
if len(t.urgent) != 0 {
|
|
|
|
return true
|
|
|
|
}
|
2015-10-29 22:16:52 +08:00
|
|
|
for i := range t.Pieces {
|
|
|
|
p := &t.Pieces[i]
|
2015-07-17 19:07:01 +08:00
|
|
|
if p.Priority != PiecePriorityNone {
|
2014-12-03 15:07:50 +08:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2015-06-16 14:57:47 +08:00
|
|
|
func (cl *Client) usefulConn(t *torrent, c *connection) bool {
|
2015-06-29 22:45:26 +08:00
|
|
|
select {
|
|
|
|
case <-c.closing:
|
|
|
|
return false
|
|
|
|
default:
|
2014-12-03 15:07:50 +08:00
|
|
|
}
|
|
|
|
if !t.haveInfo() {
|
2015-06-29 22:45:26 +08:00
|
|
|
return c.supportsExtension("ut_metadata")
|
2015-06-16 14:57:47 +08:00
|
|
|
}
|
|
|
|
if cl.seeding(t) {
|
|
|
|
return c.PeerInterested
|
2014-12-03 15:07:50 +08:00
|
|
|
}
|
2015-06-16 14:57:47 +08:00
|
|
|
return t.connHasWantedPieces(c)
|
2014-12-03 15:07:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) wantConns(t *torrent) bool {
|
2015-06-16 14:57:47 +08:00
|
|
|
if !me.seeding(t) && !t.needData() {
|
2014-12-03 15:07:50 +08:00
|
|
|
return false
|
|
|
|
}
|
2015-06-29 22:45:26 +08:00
|
|
|
if len(t.Conns) < socketsPerTorrent {
|
|
|
|
return true
|
2014-12-03 15:07:50 +08:00
|
|
|
}
|
2015-06-29 22:45:26 +08:00
|
|
|
return t.worstBadConn(me) != nil
|
2014-12-03 15:07:50 +08:00
|
|
|
}
|
|
|
|
|
2014-11-21 14:09:55 +08:00
|
|
|
func (me *Client) openNewConns(t *torrent) {
|
|
|
|
select {
|
|
|
|
case <-t.ceasingNetworking:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
for len(t.Peers) != 0 {
|
2014-12-03 15:07:50 +08:00
|
|
|
if !me.wantConns(t) {
|
|
|
|
return
|
2014-08-28 07:39:27 +08:00
|
|
|
}
|
2014-12-23 12:20:28 +08:00
|
|
|
if len(t.HalfOpen) >= me.halfOpenLimit {
|
2014-12-03 15:07:50 +08:00
|
|
|
return
|
2014-11-21 14:09:55 +08:00
|
|
|
}
|
|
|
|
var (
|
2015-12-27 20:27:32 +08:00
|
|
|
k PeersKey
|
2014-11-21 14:09:55 +08:00
|
|
|
p Peer
|
|
|
|
)
|
|
|
|
for k, p = range t.Peers {
|
|
|
|
break
|
2013-09-29 06:11:24 +08:00
|
|
|
}
|
2014-11-21 14:09:55 +08:00
|
|
|
delete(t.Peers, k)
|
|
|
|
me.initiateConn(p, t)
|
2013-09-29 06:11:24 +08:00
|
|
|
}
|
2014-11-21 14:09:55 +08:00
|
|
|
t.wantPeers.Broadcast()
|
2013-09-29 06:11:24 +08:00
|
|
|
}
|
|
|
|
|
2014-12-09 11:58:49 +08:00
|
|
|
func (me *Client) addPeers(t *torrent, peers []Peer) {
|
2015-03-08 14:28:14 +08:00
|
|
|
for _, p := range peers {
|
2015-09-17 10:54:03 +08:00
|
|
|
if me.dopplegangerAddr(net.JoinHostPort(
|
|
|
|
p.IP.String(),
|
|
|
|
strconv.FormatInt(int64(p.Port), 10),
|
|
|
|
)) {
|
2015-03-18 15:29:51 +08:00
|
|
|
continue
|
|
|
|
}
|
2015-10-18 21:00:26 +08:00
|
|
|
if _, ok := me.ipBlockRange(p.IP); ok {
|
2014-11-30 10:33:17 +08:00
|
|
|
continue
|
|
|
|
}
|
2015-06-02 22:03:43 +08:00
|
|
|
if p.Port == 0 {
|
2015-09-17 10:39:51 +08:00
|
|
|
// The spec says to scrub these yourselves. Fine.
|
2015-06-02 22:03:43 +08:00
|
|
|
continue
|
|
|
|
}
|
2015-09-28 13:30:13 +08:00
|
|
|
t.addPeer(p, me)
|
2014-11-30 10:33:17 +08:00
|
|
|
}
|
2014-12-09 11:58:49 +08:00
|
|
|
}
|
|
|
|
|
2015-03-18 15:28:13 +08:00
|
|
|
func (cl *Client) cachedMetaInfoFilename(ih InfoHash) string {
|
2014-12-02 06:39:09 +08:00
|
|
|
return filepath.Join(cl.configDir(), "torrents", ih.HexString()+".torrent")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) saveTorrentFile(t *torrent) error {
|
2015-03-18 15:28:13 +08:00
|
|
|
path := cl.cachedMetaInfoFilename(t.InfoHash)
|
2014-12-02 06:39:09 +08:00
|
|
|
os.MkdirAll(filepath.Dir(path), 0777)
|
|
|
|
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error opening file: %s", err)
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
e := bencode.NewEncoder(f)
|
|
|
|
err = e.Encode(t.MetaInfo())
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error marshalling metainfo: %s", err)
|
|
|
|
}
|
2015-02-06 11:54:59 +08:00
|
|
|
mi, err := cl.torrentCacheMetaInfo(t.InfoHash)
|
|
|
|
if err != nil {
|
|
|
|
// For example, a script kiddy makes us load too many files, and we're
|
|
|
|
// able to save the torrent, but not load it again to check it.
|
|
|
|
return nil
|
|
|
|
}
|
2014-12-02 13:32:40 +08:00
|
|
|
if !bytes.Equal(mi.Info.Hash, t.InfoHash[:]) {
|
|
|
|
log.Fatalf("%x != %x", mi.Info.Hash, t.InfoHash[:])
|
|
|
|
}
|
2014-12-02 06:39:09 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-02-25 11:48:39 +08:00
|
|
|
func (cl *Client) startTorrent(t *torrent) {
|
|
|
|
if t.Info == nil || t.data == nil {
|
|
|
|
panic("nope")
|
2015-02-06 11:54:59 +08:00
|
|
|
}
|
2014-09-15 01:25:53 +08:00
|
|
|
// If the client intends to upload, it needs to know what state pieces are
|
|
|
|
// in.
|
2015-04-27 12:05:27 +08:00
|
|
|
if !cl.config.NoUpload {
|
2014-09-15 01:25:53 +08:00
|
|
|
// Queue all pieces for hashing. This is done sequentially to avoid
|
|
|
|
// spamming goroutines.
|
2015-10-16 19:10:03 +08:00
|
|
|
for i := range t.Pieces {
|
|
|
|
t.Pieces[i].QueuedForHash = true
|
2014-06-26 22:57:07 +08:00
|
|
|
}
|
2014-09-15 01:25:53 +08:00
|
|
|
go func() {
|
|
|
|
for i := range t.Pieces {
|
2016-01-04 19:56:36 +08:00
|
|
|
cl.verifyPiece(t, i)
|
2014-09-15 01:25:53 +08:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
2015-02-25 11:48:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Storage cannot be changed once it's set.
|
2015-10-01 22:09:04 +08:00
|
|
|
func (cl *Client) setStorage(t *torrent, td Data) (err error) {
|
2015-02-25 11:48:39 +08:00
|
|
|
err = t.setStorage(td)
|
|
|
|
cl.event.Broadcast()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
cl.startTorrent(t)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-10-01 22:09:04 +08:00
|
|
|
type TorrentDataOpener func(*metainfo.Info) Data
|
2015-02-27 09:45:55 +08:00
|
|
|
|
2015-03-18 15:32:31 +08:00
|
|
|
func (cl *Client) setMetaData(t *torrent, md *metainfo.Info, bytes []byte) (err error) {
|
2015-11-05 21:40:16 +08:00
|
|
|
err = t.setMetadata(md, bytes)
|
2015-02-25 11:48:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !cl.config.DisableMetainfoCache {
|
|
|
|
if err := cl.saveTorrentFile(t); err != nil {
|
|
|
|
log.Printf("error saving torrent file for %s: %s", t, err)
|
|
|
|
}
|
|
|
|
}
|
2015-03-18 15:32:31 +08:00
|
|
|
cl.event.Broadcast()
|
2014-09-25 16:05:52 +08:00
|
|
|
close(t.gotMetainfo)
|
2015-03-18 15:32:31 +08:00
|
|
|
td := cl.torrentDataOpener(md)
|
2015-02-25 11:48:39 +08:00
|
|
|
err = cl.setStorage(t, td)
|
2014-06-26 22:57:07 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare a Torrent without any attachment to a Client. That means we can
|
|
|
|
// initialize fields all fields that don't require the Client without locking
|
|
|
|
// it.
|
2015-03-18 15:28:13 +08:00
|
|
|
func newTorrent(ih InfoHash) (t *torrent, err error) {
|
2014-06-26 22:57:07 +08:00
|
|
|
t = &torrent{
|
2015-07-15 13:31:18 +08:00
|
|
|
InfoHash: ih,
|
|
|
|
chunkSize: defaultChunkSize,
|
2015-12-27 20:27:32 +08:00
|
|
|
Peers: make(map[PeersKey]Peer),
|
2014-08-25 04:01:05 +08:00
|
|
|
|
2014-08-28 07:39:27 +08:00
|
|
|
closing: make(chan struct{}),
|
|
|
|
ceasingNetworking: make(chan struct{}),
|
2014-09-25 16:05:52 +08:00
|
|
|
|
2014-12-02 06:37:40 +08:00
|
|
|
gotMetainfo: make(chan struct{}),
|
2014-11-17 03:30:44 +08:00
|
|
|
|
2015-09-06 10:33:22 +08:00
|
|
|
HalfOpen: make(map[string]struct{}),
|
|
|
|
pieceStateChanges: pubsub.NewPubSub(),
|
2014-06-26 22:57:07 +08:00
|
|
|
}
|
2014-11-21 14:09:55 +08:00
|
|
|
t.wantPeers.L = &t.stateMu
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-12-01 17:32:17 +08:00
|
|
|
func init() {
|
|
|
|
// For shuffling the tracker tiers.
|
|
|
|
mathRand.Seed(time.Now().Unix())
|
|
|
|
}
|
|
|
|
|
2014-11-21 14:09:55 +08:00
|
|
|
// The trackers within each tier must be shuffled before use.
|
|
|
|
// http://stackoverflow.com/a/12267471/149482
|
|
|
|
// http://www.bittorrent.org/beps/bep_0012.html#order-of-processing
|
|
|
|
func shuffleTier(tier []tracker.Client) {
|
|
|
|
for i := range tier {
|
|
|
|
j := mathRand.Intn(i + 1)
|
|
|
|
tier[i], tier[j] = tier[j], tier[i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func copyTrackers(base [][]tracker.Client) (copy [][]tracker.Client) {
|
|
|
|
for _, tier := range base {
|
2014-11-21 14:54:19 +08:00
|
|
|
copy = append(copy, append([]tracker.Client{}, tier...))
|
2014-11-21 14:09:55 +08:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func mergeTier(tier []tracker.Client, newURLs []string) []tracker.Client {
|
|
|
|
nextURL:
|
|
|
|
for _, url := range newURLs {
|
|
|
|
for _, tr := range tier {
|
|
|
|
if tr.URL() == url {
|
|
|
|
continue nextURL
|
2014-03-16 23:30:10 +08:00
|
|
|
}
|
|
|
|
}
|
2014-11-21 14:09:55 +08:00
|
|
|
tr, err := tracker.New(url)
|
|
|
|
if err != nil {
|
2015-12-23 00:32:24 +08:00
|
|
|
// log.Printf("error creating tracker client for %q: %s", url, err)
|
2014-11-21 14:09:55 +08:00
|
|
|
continue
|
2014-03-16 23:30:10 +08:00
|
|
|
}
|
2014-11-21 14:09:55 +08:00
|
|
|
tier = append(tier, tr)
|
2014-03-16 23:30:10 +08:00
|
|
|
}
|
2014-11-21 14:09:55 +08:00
|
|
|
return tier
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *torrent) addTrackers(announceList [][]string) {
|
|
|
|
newTrackers := copyTrackers(t.Trackers)
|
|
|
|
for tierIndex, tier := range announceList {
|
|
|
|
if tierIndex < len(newTrackers) {
|
|
|
|
newTrackers[tierIndex] = mergeTier(newTrackers[tierIndex], tier)
|
|
|
|
} else {
|
|
|
|
newTrackers = append(newTrackers, mergeTier(nil, tier))
|
|
|
|
}
|
2014-11-21 14:32:27 +08:00
|
|
|
shuffleTier(newTrackers[tierIndex])
|
2014-11-21 14:09:55 +08:00
|
|
|
}
|
|
|
|
t.Trackers = newTrackers
|
2014-03-16 23:30:10 +08:00
|
|
|
}
|
|
|
|
|
2015-03-25 14:32:42 +08:00
|
|
|
// Don't call this before the info is available.
|
2015-07-21 20:54:02 +08:00
|
|
|
func (t *torrent) bytesCompleted() int64 {
|
2015-03-25 14:32:42 +08:00
|
|
|
if !t.haveInfo() {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return t.Info.TotalLength() - t.bytesLeft()
|
|
|
|
}
|
|
|
|
|
2015-03-07 14:11:02 +08:00
|
|
|
// A file-like handle to some torrent data resource.
|
2015-03-01 11:32:54 +08:00
|
|
|
type Handle interface {
|
|
|
|
io.Reader
|
|
|
|
io.Seeker
|
|
|
|
io.Closer
|
2015-03-04 10:06:33 +08:00
|
|
|
io.ReaderAt
|
2015-03-01 11:32:54 +08:00
|
|
|
}
|
|
|
|
|
2015-01-27 22:12:36 +08:00
|
|
|
// Returns handles to the files in the torrent. This requires the metainfo is
|
|
|
|
// available first.
|
|
|
|
func (t Torrent) Files() (ret []File) {
|
2015-03-01 11:32:54 +08:00
|
|
|
t.cl.mu.Lock()
|
2015-04-28 13:24:17 +08:00
|
|
|
info := t.Info()
|
2015-03-01 11:32:54 +08:00
|
|
|
t.cl.mu.Unlock()
|
|
|
|
if info == nil {
|
2015-02-25 11:51:56 +08:00
|
|
|
return
|
|
|
|
}
|
2015-01-27 22:12:36 +08:00
|
|
|
var offset int64
|
2015-03-01 11:32:54 +08:00
|
|
|
for _, fi := range info.UpvertedFiles() {
|
2015-01-27 22:12:36 +08:00
|
|
|
ret = append(ret, File{
|
|
|
|
t,
|
2015-03-01 11:32:54 +08:00
|
|
|
strings.Join(append([]string{info.Name}, fi.Path...), "/"),
|
2015-01-27 22:12:36 +08:00
|
|
|
offset,
|
|
|
|
fi.Length,
|
2015-02-09 21:18:59 +08:00
|
|
|
fi,
|
2015-01-27 22:12:36 +08:00
|
|
|
})
|
|
|
|
offset += fi.Length
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-06-01 16:17:14 +08:00
|
|
|
// Marks the pieces in the given region for download.
|
2015-01-27 22:12:36 +08:00
|
|
|
func (t Torrent) SetRegionPriority(off, len int64) {
|
|
|
|
t.cl.mu.Lock()
|
|
|
|
defer t.cl.mu.Unlock()
|
2015-02-25 12:42:47 +08:00
|
|
|
pieceSize := int64(t.usualPieceSize())
|
2015-01-27 22:12:36 +08:00
|
|
|
for i := off / pieceSize; i*pieceSize < off+len; i++ {
|
2015-06-01 16:22:12 +08:00
|
|
|
t.cl.raisePiecePriority(t.torrent, int(i), PiecePriorityNormal)
|
2015-01-27 22:12:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-01 17:37:33 +08:00
|
|
|
func (t Torrent) AddPeers(pp []Peer) error {
|
2015-03-08 14:28:14 +08:00
|
|
|
cl := t.cl
|
|
|
|
cl.mu.Lock()
|
|
|
|
defer cl.mu.Unlock()
|
|
|
|
cl.addPeers(t.torrent, pp)
|
|
|
|
return nil
|
2014-12-01 17:37:33 +08:00
|
|
|
}
|
|
|
|
|
2015-04-28 13:24:17 +08:00
|
|
|
// Marks the entire torrent for download. Requires the info first, see
|
|
|
|
// GotInfo.
|
2014-12-01 17:37:33 +08:00
|
|
|
func (t Torrent) DownloadAll() {
|
|
|
|
t.cl.mu.Lock()
|
2015-03-20 07:52:01 +08:00
|
|
|
defer t.cl.mu.Unlock()
|
|
|
|
for i := range iter.N(t.numPieces()) {
|
2015-06-01 16:22:12 +08:00
|
|
|
t.cl.raisePiecePriority(t.torrent, i, PiecePriorityNormal)
|
2014-12-01 17:37:33 +08:00
|
|
|
}
|
2015-03-20 07:52:01 +08:00
|
|
|
// Nice to have the first and last pieces sooner for various interactive
|
2014-12-05 14:56:28 +08:00
|
|
|
// purposes.
|
2015-06-01 16:22:12 +08:00
|
|
|
t.cl.raisePiecePriority(t.torrent, 0, PiecePriorityReadahead)
|
|
|
|
t.cl.raisePiecePriority(t.torrent, t.numPieces()-1, PiecePriorityReadahead)
|
2014-12-01 17:37:33 +08:00
|
|
|
}
|
|
|
|
|
2015-03-18 15:28:13 +08:00
|
|
|
// Returns nil metainfo if it isn't in the cache. Checks that the retrieved
|
|
|
|
// metainfo has the correct infohash.
|
2014-12-02 13:32:40 +08:00
|
|
|
func (cl *Client) torrentCacheMetaInfo(ih InfoHash) (mi *metainfo.MetaInfo, err error) {
|
2015-02-25 11:48:39 +08:00
|
|
|
if cl.config.DisableMetainfoCache {
|
|
|
|
return
|
|
|
|
}
|
2015-03-18 15:28:13 +08:00
|
|
|
f, err := os.Open(cl.cachedMetaInfoFilename(ih))
|
2014-12-02 13:32:40 +08:00
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
dec := bencode.NewDecoder(f)
|
|
|
|
err = dec.Decode(&mi)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !bytes.Equal(mi.Info.Hash, ih[:]) {
|
|
|
|
err = fmt.Errorf("cached torrent has wrong infohash: %x != %x", mi.Info.Hash, ih[:])
|
|
|
|
return
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-03-20 20:52:53 +08:00
|
|
|
// Specifies a new torrent for adding to a client. There are helpers for
|
|
|
|
// magnet URIs and torrent metainfo files.
|
2015-03-18 15:32:31 +08:00
|
|
|
type TorrentSpec struct {
|
2015-07-15 13:31:18 +08:00
|
|
|
// The tiered tracker URIs.
|
|
|
|
Trackers [][]string
|
|
|
|
InfoHash InfoHash
|
|
|
|
Info *metainfo.InfoEx
|
|
|
|
// The name to use if the Name field from the Info isn't available.
|
2015-03-18 15:32:31 +08:00
|
|
|
DisplayName string
|
2015-07-15 13:31:18 +08:00
|
|
|
// The chunk size to use for outbound requests. Defaults to 16KiB if not
|
|
|
|
// set.
|
|
|
|
ChunkSize int
|
2015-03-18 15:32:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func TorrentSpecFromMagnetURI(uri string) (spec *TorrentSpec, err error) {
|
2014-06-26 22:57:07 +08:00
|
|
|
m, err := ParseMagnetURI(uri)
|
2013-09-26 17:49:15 +08:00
|
|
|
if err != nil {
|
2014-06-26 22:57:07 +08:00
|
|
|
return
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
2015-03-18 15:32:31 +08:00
|
|
|
spec = &TorrentSpec{
|
|
|
|
Trackers: [][]string{m.Trackers},
|
|
|
|
DisplayName: m.DisplayName,
|
2015-04-01 11:34:57 +08:00
|
|
|
InfoHash: m.InfoHash,
|
2015-03-18 15:32:31 +08:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func TorrentSpecFromMetaInfo(mi *metainfo.MetaInfo) (spec *TorrentSpec) {
|
|
|
|
spec = &TorrentSpec{
|
2015-03-27 14:16:50 +08:00
|
|
|
Trackers: mi.AnnounceList,
|
|
|
|
Info: &mi.Info,
|
|
|
|
DisplayName: mi.Info.Name,
|
2015-03-18 15:32:31 +08:00
|
|
|
}
|
2015-10-10 20:31:02 +08:00
|
|
|
|
|
|
|
if len(spec.Trackers) == 0 {
|
|
|
|
spec.Trackers = [][]string{[]string{mi.Announce}}
|
|
|
|
} else {
|
|
|
|
spec.Trackers[0] = append(spec.Trackers[0], mi.Announce)
|
|
|
|
}
|
|
|
|
|
2015-03-18 15:32:31 +08:00
|
|
|
CopyExact(&spec.InfoHash, &mi.Info.Hash)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-03-27 23:50:55 +08:00
|
|
|
// Add or merge a torrent spec. If the torrent is already present, the
|
|
|
|
// trackers will be merged with the existing ones. If the Info isn't yet
|
|
|
|
// known, it will be set. The display name is replaced if the new spec
|
|
|
|
// provides one. Returns new if the torrent wasn't already in the client.
|
2015-12-27 19:49:15 +08:00
|
|
|
func (cl *Client) AddTorrentSpec(spec *TorrentSpec) (D Download, new bool, err error) {
|
|
|
|
T := Torrent{}
|
2015-03-18 15:32:31 +08:00
|
|
|
T.cl = cl
|
2015-12-27 19:49:15 +08:00
|
|
|
D = &T
|
2015-03-18 15:32:31 +08:00
|
|
|
cl.mu.Lock()
|
|
|
|
defer cl.mu.Unlock()
|
|
|
|
|
|
|
|
t, ok := cl.torrents[spec.InfoHash]
|
2015-03-27 23:50:55 +08:00
|
|
|
if !ok {
|
|
|
|
new = true
|
2015-03-18 15:32:31 +08:00
|
|
|
|
2015-03-27 23:50:55 +08:00
|
|
|
if _, ok := cl.bannedTorrents[spec.InfoHash]; ok {
|
|
|
|
err = errors.New("banned torrent")
|
|
|
|
return
|
|
|
|
}
|
2015-03-18 15:32:31 +08:00
|
|
|
|
2015-03-27 23:50:55 +08:00
|
|
|
t, err = newTorrent(spec.InfoHash)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2015-07-15 13:31:18 +08:00
|
|
|
if spec.ChunkSize != 0 {
|
|
|
|
t.chunkSize = pp.Integer(spec.ChunkSize)
|
|
|
|
}
|
2015-03-18 15:32:31 +08:00
|
|
|
}
|
|
|
|
if spec.DisplayName != "" {
|
2015-12-12 11:03:04 +08:00
|
|
|
t.setDisplayName(spec.DisplayName)
|
2015-03-18 15:32:31 +08:00
|
|
|
}
|
2015-03-27 23:50:55 +08:00
|
|
|
// Try to merge in info we have on the torrent. Any err left will
|
|
|
|
// terminate the function.
|
|
|
|
if t.Info == nil {
|
|
|
|
if spec.Info != nil {
|
|
|
|
err = cl.setMetaData(t, &spec.Info.Info, spec.Info.Bytes)
|
|
|
|
} else {
|
|
|
|
var mi *metainfo.MetaInfo
|
|
|
|
mi, err = cl.torrentCacheMetaInfo(spec.InfoHash)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error getting cached metainfo: %s", err)
|
|
|
|
err = nil
|
|
|
|
} else if mi != nil {
|
|
|
|
t.addTrackers(mi.AnnounceList)
|
|
|
|
err = cl.setMetaData(t, &mi.Info.Info, mi.Info.Bytes)
|
|
|
|
}
|
2015-02-06 11:54:59 +08:00
|
|
|
}
|
2014-12-02 13:32:40 +08:00
|
|
|
}
|
2014-06-26 22:57:07 +08:00
|
|
|
if err != nil {
|
|
|
|
return
|
2013-10-20 22:07:01 +08:00
|
|
|
}
|
2015-03-27 23:50:55 +08:00
|
|
|
t.addTrackers(spec.Trackers)
|
2015-03-18 15:32:31 +08:00
|
|
|
|
|
|
|
cl.torrents[spec.InfoHash] = t
|
|
|
|
T.torrent = t
|
|
|
|
|
2015-03-27 23:50:55 +08:00
|
|
|
// From this point onwards, we can consider the torrent a part of the
|
|
|
|
// client.
|
|
|
|
if new {
|
2015-04-27 12:05:27 +08:00
|
|
|
if !cl.config.DisableTrackers {
|
2015-03-27 23:50:55 +08:00
|
|
|
go cl.announceTorrentTrackers(T.torrent)
|
|
|
|
}
|
|
|
|
if cl.dHT != nil {
|
|
|
|
go cl.announceTorrentDHT(T.torrent, true)
|
|
|
|
}
|
2014-12-02 09:12:03 +08:00
|
|
|
}
|
2014-06-26 22:57:07 +08:00
|
|
|
return
|
|
|
|
}
|
2014-05-21 15:37:31 +08:00
|
|
|
|
2015-02-06 11:54:59 +08:00
|
|
|
func (me *Client) dropTorrent(infoHash InfoHash) (err error) {
|
2014-07-22 23:54:11 +08:00
|
|
|
t, ok := me.torrents[infoHash]
|
|
|
|
if !ok {
|
|
|
|
err = fmt.Errorf("no such torrent")
|
|
|
|
return
|
|
|
|
}
|
2015-02-09 21:12:29 +08:00
|
|
|
err = t.close()
|
2014-07-22 23:54:11 +08:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
delete(me.torrents, infoHash)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-11-21 14:09:55 +08:00
|
|
|
// Returns true when peers are required, or false if the torrent is closing.
|
|
|
|
func (cl *Client) waitWantPeers(t *torrent) bool {
|
|
|
|
cl.mu.Lock()
|
|
|
|
defer cl.mu.Unlock()
|
|
|
|
t.stateMu.Lock()
|
|
|
|
defer t.stateMu.Unlock()
|
2014-07-11 17:30:20 +08:00
|
|
|
for {
|
2014-11-21 14:09:55 +08:00
|
|
|
select {
|
|
|
|
case <-t.ceasingNetworking:
|
|
|
|
return false
|
|
|
|
default:
|
|
|
|
}
|
2015-05-15 06:39:53 +08:00
|
|
|
if len(t.Peers) > torrentPeersLowWater {
|
|
|
|
goto wait
|
|
|
|
}
|
|
|
|
if t.needData() || cl.seeding(t) {
|
2014-11-21 14:09:55 +08:00
|
|
|
return true
|
|
|
|
}
|
2015-05-15 06:39:53 +08:00
|
|
|
wait:
|
2014-11-21 14:09:55 +08:00
|
|
|
cl.mu.Unlock()
|
|
|
|
t.wantPeers.Wait()
|
|
|
|
t.stateMu.Unlock()
|
|
|
|
cl.mu.Lock()
|
|
|
|
t.stateMu.Lock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-15 06:39:53 +08:00
|
|
|
// Returns whether the client should make effort to seed the torrent.
|
|
|
|
func (cl *Client) seeding(t *torrent) bool {
|
2015-06-16 14:57:47 +08:00
|
|
|
if cl.config.NoUpload {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if !cl.config.Seed {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if t.needData() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
2015-05-15 06:39:53 +08:00
|
|
|
}
|
|
|
|
|
2014-11-21 14:09:55 +08:00
|
|
|
func (cl *Client) announceTorrentDHT(t *torrent, impliedPort bool) {
|
|
|
|
for cl.waitWantPeers(t) {
|
2015-08-02 02:01:41 +08:00
|
|
|
// log.Printf("getting peers for %q from DHT", t)
|
2015-01-29 11:20:21 +08:00
|
|
|
ps, err := cl.dHT.Announce(string(t.InfoHash[:]), cl.incomingPeerPort(), impliedPort)
|
2014-07-11 17:30:20 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Printf("error getting peers from dht: %s", err)
|
|
|
|
return
|
|
|
|
}
|
2015-08-02 01:53:37 +08:00
|
|
|
// Count all the unique addresses we got during this announce.
|
2014-12-09 06:57:42 +08:00
|
|
|
allAddrs := make(map[string]struct{})
|
2014-07-11 17:30:20 +08:00
|
|
|
getPeers:
|
|
|
|
for {
|
|
|
|
select {
|
2015-04-01 14:29:55 +08:00
|
|
|
case v, ok := <-ps.Peers:
|
2014-07-11 17:30:20 +08:00
|
|
|
if !ok {
|
|
|
|
break getPeers
|
|
|
|
}
|
2015-08-03 23:20:10 +08:00
|
|
|
addPeers := make([]Peer, 0, len(v.Peers))
|
|
|
|
for _, cp := range v.Peers {
|
|
|
|
if cp.Port == 0 {
|
|
|
|
// Can't do anything with this.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
addPeers = append(addPeers, Peer{
|
|
|
|
IP: cp.IP[:],
|
|
|
|
Port: int(cp.Port),
|
|
|
|
Source: peerSourceDHT,
|
|
|
|
})
|
|
|
|
key := (&net.UDPAddr{
|
|
|
|
IP: cp.IP[:],
|
|
|
|
Port: int(cp.Port),
|
|
|
|
}).String()
|
|
|
|
allAddrs[key] = struct{}{}
|
2014-12-09 06:57:42 +08:00
|
|
|
}
|
2014-12-09 11:58:49 +08:00
|
|
|
cl.mu.Lock()
|
2015-08-03 23:20:10 +08:00
|
|
|
cl.addPeers(t, addPeers)
|
2014-12-09 11:58:49 +08:00
|
|
|
numPeers := len(t.Peers)
|
|
|
|
cl.mu.Unlock()
|
|
|
|
if numPeers >= torrentPeersHighWater {
|
2014-07-11 17:30:20 +08:00
|
|
|
break getPeers
|
|
|
|
}
|
2014-08-28 07:39:27 +08:00
|
|
|
case <-t.ceasingNetworking:
|
2014-08-25 04:01:05 +08:00
|
|
|
ps.Close()
|
|
|
|
return
|
2014-07-11 17:30:20 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
ps.Close()
|
2015-08-02 02:01:41 +08:00
|
|
|
// log.Printf("finished DHT peer scrape for %s: %d peers", t, len(allAddrs))
|
2014-07-11 17:30:20 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-10 23:41:41 +08:00
|
|
|
func (cl *Client) trackerBlockedUnlocked(tr tracker.Client) (blocked bool, err error) {
|
|
|
|
url_, err := url.Parse(tr.URL())
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
host, _, err := net.SplitHostPort(url_.Host)
|
|
|
|
if err != nil {
|
|
|
|
host = url_.Host
|
|
|
|
}
|
|
|
|
addr, err := net.ResolveIPAddr("ip", host)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2015-10-18 21:00:26 +08:00
|
|
|
cl.mu.RLock()
|
|
|
|
_, blocked = cl.ipBlockRange(addr.IP)
|
|
|
|
cl.mu.RUnlock()
|
2015-03-10 23:41:41 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-12-01 17:33:52 +08:00
|
|
|
func (cl *Client) announceTorrentSingleTracker(tr tracker.Client, req *tracker.AnnounceRequest, t *torrent) error {
|
2015-03-10 23:41:41 +08:00
|
|
|
blocked, err := cl.trackerBlockedUnlocked(tr)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error determining if tracker blocked: %s", err)
|
|
|
|
}
|
|
|
|
if blocked {
|
|
|
|
return fmt.Errorf("tracker blocked: %s", tr)
|
|
|
|
}
|
2014-12-01 17:33:52 +08:00
|
|
|
if err := tr.Connect(); err != nil {
|
|
|
|
return fmt.Errorf("error connecting: %s", err)
|
|
|
|
}
|
|
|
|
resp, err := tr.Announce(req)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error announcing: %s", err)
|
|
|
|
}
|
|
|
|
var peers []Peer
|
|
|
|
for _, peer := range resp.Peers {
|
|
|
|
peers = append(peers, Peer{
|
|
|
|
IP: peer.IP,
|
|
|
|
Port: peer.Port,
|
|
|
|
})
|
|
|
|
}
|
2015-03-08 14:28:14 +08:00
|
|
|
cl.mu.Lock()
|
|
|
|
cl.addPeers(t, peers)
|
|
|
|
cl.mu.Unlock()
|
2015-03-12 17:04:44 +08:00
|
|
|
|
2015-08-02 02:01:41 +08:00
|
|
|
// log.Printf("%s: %d new peers from %s", t, len(peers), tr)
|
2014-12-01 17:33:52 +08:00
|
|
|
|
|
|
|
time.Sleep(time.Second * time.Duration(resp.Interval))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) announceTorrentTrackersFastStart(req *tracker.AnnounceRequest, trackers [][]tracker.Client, t *torrent) (atLeastOne bool) {
|
|
|
|
oks := make(chan bool)
|
|
|
|
outstanding := 0
|
|
|
|
for _, tier := range trackers {
|
|
|
|
for _, tr := range tier {
|
|
|
|
outstanding++
|
|
|
|
go func(tr tracker.Client) {
|
|
|
|
err := cl.announceTorrentSingleTracker(tr, req, t)
|
|
|
|
oks <- err == nil
|
|
|
|
}(tr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for outstanding > 0 {
|
|
|
|
ok := <-oks
|
|
|
|
outstanding--
|
|
|
|
if ok {
|
|
|
|
atLeastOne = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-11-30 10:33:45 +08:00
|
|
|
// Announce torrent to its trackers.
|
|
|
|
func (cl *Client) announceTorrentTrackers(t *torrent) {
|
2014-03-16 23:30:10 +08:00
|
|
|
req := tracker.AnnounceRequest{
|
2014-05-22 22:35:24 +08:00
|
|
|
Event: tracker.Started,
|
|
|
|
NumWant: -1,
|
2015-03-27 14:22:00 +08:00
|
|
|
Port: uint16(cl.incomingPeerPort()),
|
2014-08-21 16:07:06 +08:00
|
|
|
PeerId: cl.peerID,
|
2014-05-22 22:35:24 +08:00
|
|
|
InfoHash: t.InfoHash,
|
2014-03-16 23:30:10 +08:00
|
|
|
}
|
2015-02-21 12:02:06 +08:00
|
|
|
if !cl.waitWantPeers(t) {
|
|
|
|
return
|
|
|
|
}
|
2014-12-01 17:33:52 +08:00
|
|
|
cl.mu.RLock()
|
2015-03-27 14:22:00 +08:00
|
|
|
req.Left = uint64(t.bytesLeft())
|
2014-12-01 17:33:52 +08:00
|
|
|
trackers := t.Trackers
|
|
|
|
cl.mu.RUnlock()
|
|
|
|
if cl.announceTorrentTrackersFastStart(&req, trackers, t) {
|
|
|
|
req.Event = tracker.None
|
|
|
|
}
|
2014-03-16 23:30:10 +08:00
|
|
|
newAnnounce:
|
2014-11-21 14:09:55 +08:00
|
|
|
for cl.waitWantPeers(t) {
|
2014-11-30 10:33:45 +08:00
|
|
|
cl.mu.RLock()
|
2015-03-27 14:22:00 +08:00
|
|
|
req.Left = uint64(t.bytesLeft())
|
2014-12-01 17:33:52 +08:00
|
|
|
trackers = t.Trackers
|
2014-11-30 10:33:45 +08:00
|
|
|
cl.mu.RUnlock()
|
2014-12-26 14:17:49 +08:00
|
|
|
numTrackersTried := 0
|
2014-11-21 14:09:55 +08:00
|
|
|
for _, tier := range trackers {
|
2014-03-16 23:30:10 +08:00
|
|
|
for trIndex, tr := range tier {
|
2014-12-26 14:17:49 +08:00
|
|
|
numTrackersTried++
|
2014-12-01 17:33:52 +08:00
|
|
|
err := cl.announceTorrentSingleTracker(tr, &req, t)
|
2016-01-04 19:54:19 +08:00
|
|
|
if err != nil {
|
2015-03-27 14:20:02 +08:00
|
|
|
continue
|
2014-03-16 23:30:10 +08:00
|
|
|
}
|
2014-12-01 17:33:52 +08:00
|
|
|
// Float the successful announce to the top of the tier. If
|
|
|
|
// the trackers list has been changed, we'll be modifying an
|
|
|
|
// old copy so it won't matter.
|
2014-11-30 10:33:45 +08:00
|
|
|
cl.mu.Lock()
|
2014-03-16 23:30:10 +08:00
|
|
|
tier[0], tier[trIndex] = tier[trIndex], tier[0]
|
2014-11-30 10:33:45 +08:00
|
|
|
cl.mu.Unlock()
|
|
|
|
|
2014-05-22 22:35:24 +08:00
|
|
|
req.Event = tracker.None
|
2014-03-16 23:30:10 +08:00
|
|
|
continue newAnnounce
|
|
|
|
}
|
|
|
|
}
|
2014-12-26 14:17:49 +08:00
|
|
|
if numTrackersTried != 0 {
|
|
|
|
log.Printf("%s: all trackers failed", t)
|
|
|
|
}
|
2014-12-02 02:43:34 +08:00
|
|
|
// TODO: Wait until trackers are added if there are none.
|
|
|
|
time.Sleep(10 * time.Second)
|
2014-03-16 23:30:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) allTorrentsCompleted() bool {
|
|
|
|
for _, t := range cl.torrents {
|
2014-09-15 01:25:53 +08:00
|
|
|
if !t.haveInfo() {
|
|
|
|
return false
|
|
|
|
}
|
2015-02-25 12:42:47 +08:00
|
|
|
if t.numPiecesCompleted() != t.numPieces() {
|
2014-03-16 23:30:10 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2014-04-09 00:36:05 +08:00
|
|
|
// Returns true when all torrents are completely downloaded and false if the
|
2014-06-29 22:22:05 +08:00
|
|
|
// client is stopped before that.
|
2014-04-09 00:36:05 +08:00
|
|
|
func (me *Client) WaitAll() bool {
|
2013-10-20 22:07:01 +08:00
|
|
|
me.mu.Lock()
|
2014-04-09 00:36:05 +08:00
|
|
|
defer me.mu.Unlock()
|
2014-03-16 23:30:10 +08:00
|
|
|
for !me.allTorrentsCompleted() {
|
2014-04-09 00:36:05 +08:00
|
|
|
if me.stopped() {
|
|
|
|
return false
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
me.event.Wait()
|
|
|
|
}
|
2014-04-09 00:36:05 +08:00
|
|
|
return true
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
|
|
|
|
2015-03-10 23:41:21 +08:00
|
|
|
func (me *Client) fillRequests(t *torrent, c *connection) {
|
|
|
|
if c.Interested {
|
|
|
|
if c.PeerChoked {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(c.Requests) > c.requestsLowWater {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
addRequest := func(req request) (again bool) {
|
2015-04-14 21:59:41 +08:00
|
|
|
// TODO: Couldn't this check also be done *after* the request?
|
2015-03-18 15:37:26 +08:00
|
|
|
if len(c.Requests) >= 64 {
|
2015-03-10 23:41:21 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
return c.Request(req)
|
|
|
|
}
|
2015-04-14 21:59:41 +08:00
|
|
|
for req := range t.urgent {
|
|
|
|
if !addRequest(req) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2015-03-10 23:41:21 +08:00
|
|
|
for e := c.pieceRequestOrder.First(); e != nil; e = e.Next() {
|
|
|
|
pieceIndex := e.Piece()
|
2015-03-12 17:06:23 +08:00
|
|
|
if !c.PeerHasPiece(pieceIndex) {
|
2015-03-10 23:41:21 +08:00
|
|
|
panic("piece in request order but peer doesn't have it")
|
|
|
|
}
|
|
|
|
if !t.wantPiece(pieceIndex) {
|
2015-08-03 23:29:40 +08:00
|
|
|
log.Printf("unwanted piece %d in connection request order\n%s", pieceIndex, c)
|
|
|
|
c.pieceRequestOrder.DeletePiece(pieceIndex)
|
|
|
|
continue
|
2015-03-10 23:41:21 +08:00
|
|
|
}
|
2015-10-16 19:10:03 +08:00
|
|
|
piece := &t.Pieces[pieceIndex]
|
2015-07-15 13:31:18 +08:00
|
|
|
for _, cs := range piece.shuffledPendingChunkSpecs(t.pieceLength(pieceIndex), pp.Integer(t.chunkSize)) {
|
2015-03-10 23:41:21 +08:00
|
|
|
r := request{pp.Integer(pieceIndex), cs}
|
|
|
|
if !addRequest(r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-05-21 15:40:54 +08:00
|
|
|
func (me *Client) replenishConnRequests(t *torrent, c *connection) {
|
2014-07-14 21:12:15 +08:00
|
|
|
if !t.haveInfo() {
|
|
|
|
return
|
|
|
|
}
|
2015-03-10 23:41:21 +08:00
|
|
|
me.fillRequests(t, c)
|
2014-06-28 17:38:31 +08:00
|
|
|
if len(c.Requests) == 0 && !c.PeerChoked {
|
2015-03-18 15:37:26 +08:00
|
|
|
// So we're not choked, but we don't want anything right now. We may
|
|
|
|
// have completed readahead, and the readahead window has not rolled
|
|
|
|
// over to the next piece. Better to stay interested in case we're
|
|
|
|
// going to want data in the near future.
|
|
|
|
c.SetInterested(!t.haveAllPieces())
|
2013-10-14 22:39:12 +08:00
|
|
|
}
|
2014-05-21 15:40:54 +08:00
|
|
|
}
|
|
|
|
|
2014-08-28 07:32:49 +08:00
|
|
|
// Handle a received chunk from a peer.
|
2014-05-21 16:01:58 +08:00
|
|
|
func (me *Client) downloadedChunk(t *torrent, c *connection, msg *pp.Message) error {
|
2015-06-22 17:48:30 +08:00
|
|
|
chunksReceived.Add(1)
|
2014-08-21 23:33:13 +08:00
|
|
|
|
2014-05-21 16:01:58 +08:00
|
|
|
req := newRequest(msg.Index, msg.Begin, pp.Integer(len(msg.Piece)))
|
2014-05-21 15:40:54 +08:00
|
|
|
|
|
|
|
// Request has been satisfied.
|
2015-06-22 17:48:30 +08:00
|
|
|
if me.connDeleteRequest(t, c, req) {
|
|
|
|
defer me.replenishConnRequests(t, c)
|
|
|
|
} else {
|
|
|
|
unexpectedChunksReceived.Add(1)
|
|
|
|
}
|
2014-05-21 15:40:54 +08:00
|
|
|
|
2015-10-16 19:10:03 +08:00
|
|
|
piece := &t.Pieces[req.Index]
|
2015-03-10 23:39:01 +08:00
|
|
|
|
2014-05-21 15:40:54 +08:00
|
|
|
// Do we actually want this chunk?
|
2015-04-14 21:59:41 +08:00
|
|
|
if !t.wantChunk(req) {
|
2015-06-22 17:48:30 +08:00
|
|
|
unwantedChunksReceived.Add(1)
|
2014-09-11 18:30:13 +08:00
|
|
|
c.UnwantedChunksReceived++
|
2014-05-21 15:40:54 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-08-28 07:32:49 +08:00
|
|
|
c.UsefulChunksReceived++
|
|
|
|
c.lastUsefulChunkReceived = time.Now()
|
|
|
|
|
2015-06-16 14:57:47 +08:00
|
|
|
me.upload(t, c)
|
|
|
|
|
2015-08-05 00:40:46 +08:00
|
|
|
piece.pendingWritesMutex.Lock()
|
|
|
|
piece.pendingWrites++
|
|
|
|
piece.pendingWritesMutex.Unlock()
|
2015-07-15 14:00:59 +08:00
|
|
|
go func() {
|
2015-08-05 00:40:46 +08:00
|
|
|
defer func() {
|
|
|
|
piece.pendingWritesMutex.Lock()
|
|
|
|
piece.pendingWrites--
|
|
|
|
if piece.pendingWrites == 0 {
|
|
|
|
piece.noPendingWrites.Broadcast()
|
|
|
|
}
|
|
|
|
piece.pendingWritesMutex.Unlock()
|
|
|
|
}()
|
2015-07-15 14:00:59 +08:00
|
|
|
// Write the chunk out.
|
|
|
|
tr := perf.NewTimer()
|
|
|
|
err := t.writeChunk(int(msg.Index), int64(msg.Begin), msg.Piece)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error writing chunk: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
tr.Stop("write chunk")
|
2015-12-11 00:04:04 +08:00
|
|
|
me.mu.Lock()
|
|
|
|
if c.peerTouchedPieces == nil {
|
|
|
|
c.peerTouchedPieces = make(map[int]struct{})
|
|
|
|
}
|
|
|
|
c.peerTouchedPieces[int(req.Index)] = struct{}{}
|
|
|
|
me.mu.Unlock()
|
2015-07-15 14:00:59 +08:00
|
|
|
}()
|
2014-05-21 15:40:54 +08:00
|
|
|
|
2015-04-14 21:59:41 +08:00
|
|
|
// log.Println("got chunk", req)
|
2015-11-05 21:40:16 +08:00
|
|
|
me.event.Broadcast()
|
2015-09-06 10:33:22 +08:00
|
|
|
defer t.publishPieceChange(int(req.Index))
|
2014-05-21 15:40:54 +08:00
|
|
|
// Record that we have the chunk.
|
2015-07-15 13:31:18 +08:00
|
|
|
piece.unpendChunkIndex(chunkIndex(req.chunkSpec, t.chunkSize))
|
2015-04-14 21:59:41 +08:00
|
|
|
delete(t.urgent, req)
|
2015-07-17 19:04:43 +08:00
|
|
|
// It's important that the piece is potentially queued before we check if
|
|
|
|
// the piece is still wanted, because if it is queued, it won't be wanted.
|
2015-05-16 08:51:48 +08:00
|
|
|
if piece.numPendingChunks() == 0 {
|
2016-01-04 19:34:24 +08:00
|
|
|
me.queuePieceCheck(t, int(req.Index))
|
2015-07-17 19:04:43 +08:00
|
|
|
}
|
|
|
|
if !t.wantPiece(int(req.Index)) {
|
2014-12-03 08:22:38 +08:00
|
|
|
for _, c := range t.Conns {
|
2014-12-05 14:58:04 +08:00
|
|
|
c.pieceRequestOrder.DeletePiece(int(req.Index))
|
2014-12-03 08:22:38 +08:00
|
|
|
}
|
2013-10-14 22:39:12 +08:00
|
|
|
}
|
2014-05-21 15:40:54 +08:00
|
|
|
|
2014-05-29 00:44:27 +08:00
|
|
|
// Cancel pending requests for this chunk.
|
|
|
|
for _, c := range t.Conns {
|
|
|
|
if me.connCancel(t, c, req) {
|
|
|
|
me.replenishConnRequests(t, c)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-21 15:40:54 +08:00
|
|
|
return nil
|
2013-10-13 20:16:21 +08:00
|
|
|
}
|
|
|
|
|
2015-08-05 00:43:53 +08:00
|
|
|
// Return the connections that touched a piece, and clear the entry while
|
|
|
|
// doing it.
|
|
|
|
func (me *Client) reapPieceTouches(t *torrent, piece int) (ret []*connection) {
|
|
|
|
for _, c := range t.Conns {
|
|
|
|
if _, ok := c.peerTouchedPieces[piece]; ok {
|
|
|
|
ret = append(ret, c)
|
|
|
|
delete(c.peerTouchedPieces, piece)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-01-04 19:34:24 +08:00
|
|
|
func (me *Client) pieceHashed(t *torrent, piece int, correct bool) {
|
2015-10-16 19:10:03 +08:00
|
|
|
p := &t.Pieces[piece]
|
2015-06-28 14:41:51 +08:00
|
|
|
if p.EverHashed {
|
2015-08-05 00:43:53 +08:00
|
|
|
// Don't score the first time a piece is hashed, it could be an
|
|
|
|
// initial check.
|
2015-06-28 14:41:51 +08:00
|
|
|
if correct {
|
|
|
|
pieceHashedCorrect.Add(1)
|
|
|
|
} else {
|
|
|
|
log.Printf("%s: piece %d failed hash", t, piece)
|
|
|
|
pieceHashedNotCorrect.Add(1)
|
|
|
|
}
|
2014-09-14 01:57:51 +08:00
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
p.EverHashed = true
|
2015-08-05 00:43:53 +08:00
|
|
|
touchers := me.reapPieceTouches(t, int(piece))
|
2015-02-27 09:45:55 +08:00
|
|
|
if correct {
|
2015-06-02 22:03:43 +08:00
|
|
|
err := t.data.PieceCompleted(int(piece))
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error completing piece: %s", err)
|
|
|
|
correct = false
|
2015-02-27 09:45:55 +08:00
|
|
|
}
|
2015-08-05 00:43:53 +08:00
|
|
|
} else if len(touchers) != 0 {
|
|
|
|
log.Printf("dropping %d conns that touched piece", len(touchers))
|
|
|
|
for _, c := range touchers {
|
|
|
|
me.dropConnection(t, c)
|
|
|
|
}
|
2015-02-27 09:45:55 +08:00
|
|
|
}
|
2015-03-10 23:41:21 +08:00
|
|
|
me.pieceChanged(t, int(piece))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) pieceChanged(t *torrent, piece int) {
|
|
|
|
correct := t.pieceComplete(piece)
|
2015-10-16 19:10:03 +08:00
|
|
|
p := &t.Pieces[piece]
|
2015-09-06 10:33:22 +08:00
|
|
|
defer t.publishPieceChange(piece)
|
2015-11-05 21:40:16 +08:00
|
|
|
defer me.event.Broadcast()
|
2013-10-20 22:07:01 +08:00
|
|
|
if correct {
|
2015-06-01 16:22:12 +08:00
|
|
|
p.Priority = PiecePriorityNone
|
2013-10-20 22:07:01 +08:00
|
|
|
p.PendingChunkSpecs = nil
|
2015-04-14 21:59:41 +08:00
|
|
|
for req := range t.urgent {
|
|
|
|
if int(req.Index) == piece {
|
|
|
|
delete(t.urgent, req)
|
|
|
|
}
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
} else {
|
2015-05-16 08:51:48 +08:00
|
|
|
if p.numPendingChunks() == 0 {
|
2015-03-10 23:39:01 +08:00
|
|
|
t.pendAllChunkSpecs(int(piece))
|
2013-10-20 22:07:01 +08:00
|
|
|
}
|
2015-04-14 21:59:41 +08:00
|
|
|
if t.wantPiece(piece) {
|
2014-12-03 15:07:50 +08:00
|
|
|
me.openNewConns(t)
|
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
for _, conn := range t.Conns {
|
2013-09-30 19:51:08 +08:00
|
|
|
if correct {
|
2016-01-04 19:37:49 +08:00
|
|
|
conn.Have(piece)
|
2014-09-14 02:03:23 +08:00
|
|
|
for r := range conn.Requests {
|
2015-03-10 23:39:01 +08:00
|
|
|
if int(r.Index) == piece {
|
2015-05-09 09:52:52 +08:00
|
|
|
conn.Cancel(r)
|
2014-09-14 02:03:23 +08:00
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
2014-12-05 14:58:04 +08:00
|
|
|
conn.pieceRequestOrder.DeletePiece(int(piece))
|
2015-08-12 15:11:14 +08:00
|
|
|
me.upload(t, conn)
|
2015-07-17 19:07:01 +08:00
|
|
|
} else if t.wantPiece(piece) && conn.PeerHasPiece(piece) {
|
2014-12-09 14:22:05 +08:00
|
|
|
t.connPendPiece(conn, int(piece))
|
|
|
|
me.replenishConnRequests(t, conn)
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
|
|
|
}
|
2014-03-16 23:30:10 +08:00
|
|
|
me.event.Broadcast()
|
2013-10-02 15:57:59 +08:00
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
|
2016-01-04 19:34:24 +08:00
|
|
|
func (cl *Client) verifyPiece(t *torrent, piece int) {
|
2014-03-20 01:30:08 +08:00
|
|
|
cl.mu.Lock()
|
2014-09-14 02:07:05 +08:00
|
|
|
defer cl.mu.Unlock()
|
2016-01-04 19:34:24 +08:00
|
|
|
p := &t.Pieces[piece]
|
2015-02-25 11:48:39 +08:00
|
|
|
for p.Hashing || t.data == nil {
|
2014-03-20 01:30:08 +08:00
|
|
|
cl.event.Wait()
|
|
|
|
}
|
2015-02-27 09:45:55 +08:00
|
|
|
p.QueuedForHash = false
|
2016-01-04 19:34:24 +08:00
|
|
|
if t.isClosed() || t.pieceComplete(piece) {
|
2014-07-22 23:54:11 +08:00
|
|
|
return
|
|
|
|
}
|
2014-03-20 01:30:08 +08:00
|
|
|
p.Hashing = true
|
|
|
|
cl.mu.Unlock()
|
2016-01-04 19:34:24 +08:00
|
|
|
sum := t.hashPiece(piece)
|
2013-10-20 22:07:01 +08:00
|
|
|
cl.mu.Lock()
|
2014-09-14 02:07:05 +08:00
|
|
|
select {
|
|
|
|
case <-t.closing:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2014-03-20 01:30:08 +08:00
|
|
|
p.Hashing = false
|
2016-01-04 19:34:24 +08:00
|
|
|
cl.pieceHashed(t, piece, sum == p.Hash)
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
2013-10-06 15:01:39 +08:00
|
|
|
|
2015-03-08 14:28:14 +08:00
|
|
|
// Returns handles to all the torrents loaded in the Client.
|
2015-12-27 19:49:15 +08:00
|
|
|
func (me *Client) Torrents() (ret []Download) {
|
2013-10-20 22:07:01 +08:00
|
|
|
me.mu.Lock()
|
|
|
|
for _, t := range me.torrents {
|
2015-04-29 22:30:19 +08:00
|
|
|
ret = append(ret, Torrent{me, t})
|
2013-10-20 22:07:01 +08:00
|
|
|
}
|
|
|
|
me.mu.Unlock()
|
2013-10-06 15:01:39 +08:00
|
|
|
return
|
|
|
|
}
|
2015-03-18 15:32:31 +08:00
|
|
|
|
2015-12-27 19:49:15 +08:00
|
|
|
func (me *Client) AddMagnet(uri string) (T Download, err error) {
|
2015-03-18 15:32:31 +08:00
|
|
|
spec, err := TorrentSpecFromMagnetURI(uri)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
T, _, err = me.AddTorrentSpec(spec)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-27 19:49:15 +08:00
|
|
|
func (me *Client) AddTorrent(mi *metainfo.MetaInfo) (T Download, err error) {
|
2015-03-18 15:32:31 +08:00
|
|
|
T, _, err = me.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-27 19:49:15 +08:00
|
|
|
func (me *Client) AddTorrentFromFile(filename string) (T Download, err error) {
|
2015-03-18 15:32:31 +08:00
|
|
|
mi, err := metainfo.LoadFromFile(filename)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
T, _, err = me.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
|
|
|
|
return
|
|
|
|
}
|
2015-08-03 23:07:22 +08:00
|
|
|
|
|
|
|
func (me *Client) DHT() *dht.Server {
|
|
|
|
return me.dHT
|
|
|
|
}
|