2014-04-09 00:36:05 +08:00
|
|
|
/*
|
|
|
|
Package torrent implements a torrent client.
|
|
|
|
|
|
|
|
Simple example:
|
|
|
|
|
|
|
|
c := &Client{}
|
|
|
|
c.Start()
|
|
|
|
defer c.Stop()
|
|
|
|
if err := c.AddTorrent(externalMetaInfoPackageSux); err != nil {
|
|
|
|
return fmt.Errors("error adding torrent: %s", err)
|
|
|
|
}
|
|
|
|
c.WaitAll()
|
|
|
|
log.Print("erhmahgerd, torrent downloaded")
|
|
|
|
|
|
|
|
*/
|
2013-09-26 17:49:15 +08:00
|
|
|
package torrent
|
|
|
|
|
|
|
|
import (
|
2013-09-30 19:51:08 +08:00
|
|
|
"bufio"
|
2014-09-11 12:20:47 +08:00
|
|
|
"bytes"
|
2014-08-28 08:06:36 +08:00
|
|
|
"container/heap"
|
2013-09-29 06:11:24 +08:00
|
|
|
"crypto/rand"
|
2014-06-26 22:57:07 +08:00
|
|
|
"crypto/sha1"
|
2013-09-26 17:49:15 +08:00
|
|
|
"errors"
|
2014-08-21 23:33:13 +08:00
|
|
|
"expvar"
|
2013-10-07 15:58:33 +08:00
|
|
|
"fmt"
|
2013-09-26 17:49:15 +08:00
|
|
|
"io"
|
2013-09-29 06:11:24 +08:00
|
|
|
"log"
|
2014-03-16 23:30:10 +08:00
|
|
|
mathRand "math/rand"
|
2013-09-29 06:11:24 +08:00
|
|
|
"net"
|
2013-09-26 17:49:15 +08:00
|
|
|
"os"
|
2014-09-14 01:50:15 +08:00
|
|
|
"strings"
|
2013-10-20 22:07:01 +08:00
|
|
|
"sync"
|
2014-04-03 20:16:59 +08:00
|
|
|
"syscall"
|
2013-10-20 22:07:01 +08:00
|
|
|
"time"
|
2014-03-20 13:58:09 +08:00
|
|
|
|
2014-11-17 03:29:31 +08:00
|
|
|
"github.com/h2so5/utp"
|
2014-08-21 16:07:06 +08:00
|
|
|
|
2014-11-17 13:27:01 +08:00
|
|
|
"github.com/anacrolix/libtorgo/bencode"
|
2014-06-28 17:38:31 +08:00
|
|
|
"github.com/anacrolix/libtorgo/metainfo"
|
2014-03-20 13:58:09 +08:00
|
|
|
|
2014-11-17 03:29:31 +08:00
|
|
|
"bitbucket.org/anacrolix/go.torrent/dht"
|
2014-05-21 16:01:58 +08:00
|
|
|
pp "bitbucket.org/anacrolix/go.torrent/peer_protocol"
|
2014-03-20 13:58:09 +08:00
|
|
|
"bitbucket.org/anacrolix/go.torrent/tracker"
|
|
|
|
_ "bitbucket.org/anacrolix/go.torrent/tracker/udp"
|
2014-11-17 03:29:31 +08:00
|
|
|
. "bitbucket.org/anacrolix/go.torrent/util"
|
|
|
|
"bitbucket.org/anacrolix/go.torrent/util/levelmu"
|
2013-09-26 17:49:15 +08:00
|
|
|
)
|
|
|
|
|
2014-08-21 23:33:13 +08:00
|
|
|
var (
|
|
|
|
unusedDownloadedChunksCount = expvar.NewInt("unusedDownloadedChunksCount")
|
|
|
|
chunksDownloadedCount = expvar.NewInt("chunksDownloadedCount")
|
|
|
|
peersFoundByDHT = expvar.NewInt("peersFoundByDHT")
|
|
|
|
peersFoundByPEX = expvar.NewInt("peersFoundByPEX")
|
2014-08-22 15:33:17 +08:00
|
|
|
uploadChunksPosted = expvar.NewInt("uploadChunksPosted")
|
2014-08-22 15:47:44 +08:00
|
|
|
unexpectedCancels = expvar.NewInt("unexpectedCancels")
|
2014-08-24 01:10:47 +08:00
|
|
|
postedCancels = expvar.NewInt("postedCancels")
|
2014-08-28 07:45:58 +08:00
|
|
|
duplicateConnsAvoided = expvar.NewInt("duplicateConnsAvoided")
|
2014-09-14 01:57:51 +08:00
|
|
|
failedPieceHashes = expvar.NewInt("failedPieceHashes")
|
2014-11-17 15:44:06 +08:00
|
|
|
unsuccessfulDials = expvar.NewInt("unsuccessfulDials")
|
|
|
|
successfulDials = expvar.NewInt("successfulDials")
|
2014-08-21 23:33:13 +08:00
|
|
|
)
|
|
|
|
|
2014-08-28 08:06:36 +08:00
|
|
|
const (
|
|
|
|
// Justification for set bits follows.
|
|
|
|
//
|
|
|
|
// Extension protocol: http://www.bittorrent.org/beps/bep_0010.html
|
|
|
|
// DHT: http://www.bittorrent.org/beps/bep_0005.html
|
|
|
|
extensionBytes = "\x00\x00\x00\x00\x00\x10\x00\x01"
|
|
|
|
|
|
|
|
socketsPerTorrent = 40
|
|
|
|
)
|
2014-08-21 16:12:49 +08:00
|
|
|
|
2014-03-16 23:30:10 +08:00
|
|
|
// Currently doesn't really queue, but should in the future.
|
2014-05-21 16:01:58 +08:00
|
|
|
func (cl *Client) queuePieceCheck(t *torrent, pieceIndex pp.Integer) {
|
2013-10-20 22:07:01 +08:00
|
|
|
piece := t.Pieces[pieceIndex]
|
2014-03-20 01:30:08 +08:00
|
|
|
if piece.QueuedForHash {
|
2013-10-20 22:07:01 +08:00
|
|
|
return
|
|
|
|
}
|
2014-03-20 01:30:08 +08:00
|
|
|
piece.QueuedForHash = true
|
2013-10-20 22:07:01 +08:00
|
|
|
go cl.verifyPiece(t, pieceIndex)
|
|
|
|
}
|
|
|
|
|
2014-09-14 01:50:15 +08:00
|
|
|
func (cl *Client) queueFirstHash(t *torrent, piece int) {
|
|
|
|
p := t.Pieces[piece]
|
|
|
|
if p.EverHashed || p.Hashing || p.QueuedForHash {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
cl.queuePieceCheck(t, pp.Integer(piece))
|
|
|
|
}
|
|
|
|
|
2014-04-09 00:36:05 +08:00
|
|
|
// Queues the torrent data for the given region for download. The beginning of
|
|
|
|
// the region is given highest priority to allow a subsequent read at the same
|
|
|
|
// offset to return data ASAP.
|
|
|
|
func (me *Client) PrioritizeDataRegion(ih InfoHash, off, len_ int64) error {
|
|
|
|
me.mu.Lock()
|
|
|
|
defer me.mu.Unlock()
|
|
|
|
t := me.torrent(ih)
|
2014-04-08 23:15:39 +08:00
|
|
|
if t == nil {
|
|
|
|
return errors.New("no such active torrent")
|
|
|
|
}
|
2014-06-28 17:38:31 +08:00
|
|
|
if !t.haveInfo() {
|
2014-06-26 22:57:07 +08:00
|
|
|
return errors.New("missing metadata")
|
|
|
|
}
|
2014-08-21 16:07:06 +08:00
|
|
|
me.downloadStrategy.TorrentPrioritize(t, off, len_)
|
2013-10-20 22:07:01 +08:00
|
|
|
for _, cn := range t.Conns {
|
2014-04-09 00:36:05 +08:00
|
|
|
me.replenishConnRequests(t, cn)
|
2013-10-20 22:07:01 +08:00
|
|
|
}
|
2014-04-08 23:15:39 +08:00
|
|
|
return nil
|
2013-09-29 06:11:24 +08:00
|
|
|
}
|
|
|
|
|
2014-09-14 01:50:15 +08:00
|
|
|
type dataWait struct {
|
|
|
|
offset int64
|
|
|
|
ready chan struct{}
|
2013-10-13 20:16:21 +08:00
|
|
|
}
|
|
|
|
|
2013-10-06 15:01:39 +08:00
|
|
|
type Client struct {
|
2014-08-25 04:00:29 +08:00
|
|
|
noUpload bool
|
2014-08-21 16:07:06 +08:00
|
|
|
dataDir string
|
|
|
|
halfOpenLimit int
|
|
|
|
peerID [20]byte
|
2014-11-17 03:29:31 +08:00
|
|
|
listeners []net.Listener
|
2014-08-21 16:07:06 +08:00
|
|
|
disableTrackers bool
|
|
|
|
downloadStrategy DownloadStrategy
|
|
|
|
dHT *dht.Server
|
2013-09-29 06:11:24 +08:00
|
|
|
|
2014-08-25 03:23:28 +08:00
|
|
|
mu levelmu.LevelMutex
|
2013-10-20 22:07:01 +08:00
|
|
|
event sync.Cond
|
2014-03-18 19:39:33 +08:00
|
|
|
quit chan struct{}
|
2013-10-20 22:07:01 +08:00
|
|
|
|
2014-08-28 07:45:20 +08:00
|
|
|
handshaking int
|
2014-11-17 03:30:44 +08:00
|
|
|
|
|
|
|
torrents map[InfoHash]*torrent
|
2014-08-25 03:24:18 +08:00
|
|
|
|
2014-09-14 01:50:15 +08:00
|
|
|
dataWaits map[*torrent][]dataWait
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
|
|
|
|
2014-11-17 03:54:43 +08:00
|
|
|
func (me *Client) PeerID() string {
|
|
|
|
return string(me.peerID[:])
|
|
|
|
}
|
|
|
|
|
2014-11-17 03:16:26 +08:00
|
|
|
func (me *Client) ListenAddr() (addr net.Addr) {
|
|
|
|
for _, l := range me.listeners {
|
|
|
|
if addr != nil && l.Addr().String() != addr.String() {
|
|
|
|
panic("listeners exist on different addresses")
|
|
|
|
}
|
|
|
|
addr = l.Addr()
|
|
|
|
}
|
|
|
|
return
|
2014-08-21 16:07:06 +08:00
|
|
|
}
|
|
|
|
|
2014-06-26 15:29:12 +08:00
|
|
|
func (cl *Client) WriteStatus(w io.Writer) {
|
2014-08-28 08:05:41 +08:00
|
|
|
cl.mu.LevelLock(1)
|
2014-06-26 15:29:12 +08:00
|
|
|
defer cl.mu.Unlock()
|
2014-11-17 03:16:26 +08:00
|
|
|
fmt.Fprintf(w, "Listening on %s\n", cl.ListenAddr())
|
2014-08-21 16:07:06 +08:00
|
|
|
fmt.Fprintf(w, "Peer ID: %q\n", cl.peerID)
|
2014-08-28 07:45:20 +08:00
|
|
|
fmt.Fprintf(w, "Handshaking: %d\n", cl.handshaking)
|
2014-08-21 16:07:06 +08:00
|
|
|
if cl.dHT != nil {
|
|
|
|
fmt.Fprintf(w, "DHT nodes: %d\n", cl.dHT.NumNodes())
|
|
|
|
fmt.Fprintf(w, "DHT Server ID: %x\n", cl.dHT.IDString())
|
2014-11-17 03:29:31 +08:00
|
|
|
fmt.Fprintf(w, "DHT port: %d\n", addrPort(cl.dHT.LocalAddr()))
|
|
|
|
fmt.Fprintf(w, "DHT announces: %d\n", cl.dHT.NumConfirmedAnnounces)
|
2014-07-22 23:50:49 +08:00
|
|
|
}
|
2014-08-22 15:33:17 +08:00
|
|
|
cl.downloadStrategy.WriteStatus(w)
|
2014-07-16 15:07:28 +08:00
|
|
|
fmt.Fprintln(w)
|
2014-06-26 15:29:12 +08:00
|
|
|
for _, t := range cl.torrents {
|
2014-06-29 16:56:19 +08:00
|
|
|
fmt.Fprintf(w, "%s: %f%%\n", t.Name(), func() float32 {
|
|
|
|
if !t.haveInfo() {
|
|
|
|
return 0
|
|
|
|
} else {
|
|
|
|
return 100 * (1 - float32(t.BytesLeft())/float32(t.Length()))
|
|
|
|
}
|
|
|
|
}())
|
2014-09-14 01:50:15 +08:00
|
|
|
fmt.Fprint(w, "Blocked reads:")
|
|
|
|
for _, dw := range cl.dataWaits[t] {
|
|
|
|
fmt.Fprintf(w, " %d", dw.offset)
|
|
|
|
}
|
|
|
|
fmt.Fprintln(w)
|
2014-06-26 15:29:12 +08:00
|
|
|
t.WriteStatus(w)
|
2014-07-17 13:58:33 +08:00
|
|
|
fmt.Fprintln(w)
|
2014-06-26 15:29:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-09 00:36:05 +08:00
|
|
|
// Read torrent data at the given offset. Returns ErrDataNotReady if the data
|
|
|
|
// isn't available.
|
2013-10-13 20:16:21 +08:00
|
|
|
func (cl *Client) TorrentReadAt(ih InfoHash, off int64, p []byte) (n int, err error) {
|
2014-08-25 03:23:28 +08:00
|
|
|
cl.mu.LevelLock(1)
|
2013-10-20 22:07:01 +08:00
|
|
|
defer cl.mu.Unlock()
|
|
|
|
t := cl.torrent(ih)
|
|
|
|
if t == nil {
|
|
|
|
err = errors.New("unknown torrent")
|
|
|
|
return
|
|
|
|
}
|
2014-06-28 17:38:31 +08:00
|
|
|
index := pp.Integer(off / int64(t.UsualPieceSize()))
|
2014-03-17 22:44:22 +08:00
|
|
|
// Reading outside the bounds of a file is an error.
|
|
|
|
if index < 0 {
|
|
|
|
err = os.ErrInvalid
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if int(index) >= len(t.Pieces) {
|
|
|
|
err = io.EOF
|
|
|
|
return
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
piece := t.Pieces[index]
|
2014-08-24 04:54:14 +08:00
|
|
|
pieceOff := pp.Integer(off % int64(t.UsualPieceSize()))
|
|
|
|
pieceLeft := int(t.PieceLength(index) - pieceOff)
|
|
|
|
if pieceLeft <= 0 {
|
|
|
|
err = io.EOF
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(p) > pieceLeft {
|
|
|
|
p = p[:pieceLeft]
|
2013-10-20 22:07:01 +08:00
|
|
|
}
|
|
|
|
for cs, _ := range piece.PendingChunkSpecs {
|
|
|
|
chunkOff := int64(pieceOff) - int64(cs.Begin)
|
|
|
|
if chunkOff >= int64(t.PieceLength(index)) {
|
|
|
|
panic(chunkOff)
|
2013-10-13 20:16:21 +08:00
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
if 0 <= chunkOff && chunkOff < int64(cs.Length) {
|
|
|
|
// read begins in a pending chunk
|
2013-10-13 20:16:21 +08:00
|
|
|
err = ErrDataNotReady
|
|
|
|
return
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
// pending chunk caps available data
|
|
|
|
if chunkOff < 0 && int64(len(p)) > -chunkOff {
|
|
|
|
p = p[:-chunkOff]
|
|
|
|
}
|
|
|
|
}
|
2014-08-24 04:54:14 +08:00
|
|
|
if len(p) == 0 {
|
|
|
|
panic(len(p))
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
return t.Data.ReadAt(p, off)
|
2013-10-13 20:16:21 +08:00
|
|
|
}
|
2013-09-26 17:49:15 +08:00
|
|
|
|
2014-08-21 16:07:06 +08:00
|
|
|
func NewClient(cfg *Config) (cl *Client, err error) {
|
|
|
|
if cfg == nil {
|
|
|
|
cfg = &Config{}
|
2013-10-14 22:39:12 +08:00
|
|
|
}
|
2014-08-21 16:07:06 +08:00
|
|
|
|
|
|
|
cl = &Client{
|
2014-08-25 04:00:29 +08:00
|
|
|
noUpload: cfg.NoUpload,
|
2014-08-21 16:07:06 +08:00
|
|
|
disableTrackers: cfg.DisableTrackers,
|
|
|
|
downloadStrategy: cfg.DownloadStrategy,
|
|
|
|
halfOpenLimit: 100,
|
|
|
|
dataDir: cfg.DataDir,
|
|
|
|
|
|
|
|
quit: make(chan struct{}),
|
|
|
|
torrents: make(map[InfoHash]*torrent),
|
2014-09-14 01:50:15 +08:00
|
|
|
|
|
|
|
dataWaits: make(map[*torrent][]dataWait),
|
2014-08-21 16:07:06 +08:00
|
|
|
}
|
|
|
|
cl.event.L = &cl.mu
|
2014-08-25 03:23:28 +08:00
|
|
|
cl.mu.Init(2)
|
2014-08-21 16:07:06 +08:00
|
|
|
|
2014-11-17 03:54:43 +08:00
|
|
|
if cfg.PeerID != "" {
|
|
|
|
CopyExact(&cl.peerID, cfg.PeerID)
|
|
|
|
} else {
|
|
|
|
o := copy(cl.peerID[:], BEP20)
|
|
|
|
_, err = rand.Read(cl.peerID[o:])
|
|
|
|
if err != nil {
|
|
|
|
panic("error generating peer id")
|
|
|
|
}
|
2013-09-29 06:11:24 +08:00
|
|
|
}
|
2014-08-21 16:07:06 +08:00
|
|
|
|
|
|
|
if cl.downloadStrategy == nil {
|
|
|
|
cl.downloadStrategy = &DefaultDownloadStrategy{}
|
|
|
|
}
|
|
|
|
|
2014-11-17 03:29:31 +08:00
|
|
|
// Returns the laddr string to listen on for the next Listen call.
|
|
|
|
listenAddr := func() string {
|
|
|
|
if addr := cl.ListenAddr(); addr != nil {
|
|
|
|
return addr.String()
|
|
|
|
}
|
|
|
|
return cfg.ListenAddr
|
2014-05-21 15:40:54 +08:00
|
|
|
}
|
2014-11-17 13:27:01 +08:00
|
|
|
if !cfg.DisableTCP {
|
|
|
|
var l net.Listener
|
2014-11-17 03:29:31 +08:00
|
|
|
l, err = net.Listen("tcp", listenAddr())
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
cl.listeners = append(cl.listeners, l)
|
|
|
|
go cl.acceptConnections(l, false)
|
|
|
|
}
|
2014-11-17 13:27:01 +08:00
|
|
|
var utpL *utp.UTPListener
|
|
|
|
if !cfg.DisableUTP {
|
|
|
|
utpL, err = utp.Listen("utp", listenAddr())
|
2014-11-17 03:29:31 +08:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2014-11-17 13:27:01 +08:00
|
|
|
cl.listeners = append(cl.listeners, utpL)
|
|
|
|
go cl.acceptConnections(utpL, true)
|
2014-03-17 22:44:22 +08:00
|
|
|
}
|
2014-08-21 16:07:06 +08:00
|
|
|
if !cfg.NoDHT {
|
|
|
|
cl.dHT, err = dht.NewServer(&dht.ServerConfig{
|
2014-11-17 13:27:01 +08:00
|
|
|
Addr: listenAddr(),
|
|
|
|
Conn: utpL.RawConn,
|
2014-08-21 16:07:06 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
2014-03-17 22:44:22 +08:00
|
|
|
}
|
|
|
|
|
2014-03-20 13:58:09 +08:00
|
|
|
func (cl *Client) stopped() bool {
|
|
|
|
select {
|
|
|
|
case <-cl.quit:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-09 00:36:05 +08:00
|
|
|
// Stops the client. All connections to peers are closed and all activity will
|
|
|
|
// come to a halt.
|
2014-03-18 19:39:33 +08:00
|
|
|
func (me *Client) Stop() {
|
2014-04-09 00:36:05 +08:00
|
|
|
me.mu.Lock()
|
2014-03-18 19:39:33 +08:00
|
|
|
close(me.quit)
|
|
|
|
me.event.Broadcast()
|
|
|
|
for _, t := range me.torrents {
|
2014-08-28 08:04:44 +08:00
|
|
|
t.Close()
|
2014-03-18 19:39:33 +08:00
|
|
|
}
|
2014-04-09 00:36:05 +08:00
|
|
|
me.mu.Unlock()
|
2014-03-18 19:39:33 +08:00
|
|
|
}
|
|
|
|
|
2014-11-17 03:29:31 +08:00
|
|
|
func (cl *Client) acceptConnections(l net.Listener, utp bool) {
|
2014-03-17 22:44:22 +08:00
|
|
|
for {
|
2014-08-28 08:05:41 +08:00
|
|
|
// We accept all connections immediately, because we don't what
|
|
|
|
// torrent they're for.
|
2014-11-17 03:29:31 +08:00
|
|
|
conn, err := l.Accept()
|
2014-03-18 19:39:33 +08:00
|
|
|
select {
|
|
|
|
case <-cl.quit:
|
2014-07-03 23:44:15 +08:00
|
|
|
if conn != nil {
|
|
|
|
conn.Close()
|
|
|
|
}
|
2014-03-18 19:39:33 +08:00
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2014-03-17 22:44:22 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Print(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
go func() {
|
2014-11-17 03:29:31 +08:00
|
|
|
if err := cl.runConnection(conn, nil, peerSourceIncoming, utp); err != nil {
|
2014-03-17 22:44:22 +08:00
|
|
|
log.Print(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
|
|
|
|
2014-04-09 00:36:05 +08:00
|
|
|
func (me *Client) torrent(ih InfoHash) *torrent {
|
2013-09-29 06:11:24 +08:00
|
|
|
for _, t := range me.torrents {
|
|
|
|
if t.InfoHash == ih {
|
|
|
|
return t
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-11-17 13:27:01 +08:00
|
|
|
type dialResult struct {
|
|
|
|
net.Conn
|
|
|
|
UTP bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func doDial(dial func() (net.Conn, error), ch chan dialResult, utp bool) {
|
|
|
|
conn, err := dial()
|
2014-11-18 08:04:33 +08:00
|
|
|
if err != nil {
|
|
|
|
conn = nil // Pedantic
|
|
|
|
}
|
2014-11-17 13:27:01 +08:00
|
|
|
ch <- dialResult{conn, utp}
|
2014-11-17 15:44:06 +08:00
|
|
|
if err == nil {
|
|
|
|
successfulDials.Add(1)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
unsuccessfulDials.Add(1)
|
2014-11-17 13:27:01 +08:00
|
|
|
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if netOpErr, ok := err.(*net.OpError); ok {
|
|
|
|
switch netOpErr.Err {
|
|
|
|
case syscall.ECONNREFUSED, syscall.EHOSTUNREACH:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error connecting to peer: %s %#v", err, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-18 08:04:09 +08:00
|
|
|
func reducedDialTimeout(max time.Duration, halfOpenLimit int, pendingPeers int) time.Duration {
|
|
|
|
return max / time.Duration((pendingPeers+halfOpenLimit)/halfOpenLimit)
|
|
|
|
}
|
|
|
|
|
2014-07-22 19:45:12 +08:00
|
|
|
// Start the process of connecting to the given peer for the given torrent if
|
|
|
|
// appropriate.
|
2014-11-17 03:29:31 +08:00
|
|
|
func (me *Client) initiateConn(peer Peer, t *torrent) {
|
2014-08-21 16:07:06 +08:00
|
|
|
if peer.Id == me.peerID {
|
2013-09-29 06:11:24 +08:00
|
|
|
return
|
|
|
|
}
|
2014-11-17 03:30:44 +08:00
|
|
|
addr := net.JoinHostPort(peer.IP.String(), fmt.Sprintf("%d", peer.Port))
|
|
|
|
if t.addrActive(addr) {
|
|
|
|
duplicateConnsAvoided.Add(1)
|
|
|
|
return
|
2014-08-28 07:35:13 +08:00
|
|
|
}
|
2014-11-17 03:30:44 +08:00
|
|
|
t.HalfOpen[addr] = struct{}{}
|
2013-09-29 06:11:24 +08:00
|
|
|
go func() {
|
2014-11-17 13:27:01 +08:00
|
|
|
// Binding to the listen address and dialing via net.Dialer gives
|
2014-08-21 16:12:49 +08:00
|
|
|
// "address in use" error. It seems it's not possible to dial out from
|
|
|
|
// this address so that peers associate our local address with our
|
|
|
|
// listen address.
|
2013-10-20 22:07:01 +08:00
|
|
|
|
2014-11-18 08:04:09 +08:00
|
|
|
dialTimeout := reducedDialTimeout(dialTimeout, me.halfOpenLimit, len(t.Peers))
|
2014-11-17 13:27:01 +08:00
|
|
|
// Initiate connections via TCP and UTP simultaneously. Use the first
|
|
|
|
// one that succeeds.
|
|
|
|
left := 2
|
|
|
|
resCh := make(chan dialResult, left)
|
|
|
|
go doDial(func() (net.Conn, error) {
|
|
|
|
time.Sleep(time.Second) // Give uTP a bit of a head start.
|
|
|
|
return net.DialTimeout("tcp", addr, dialTimeout)
|
|
|
|
}, resCh, false)
|
|
|
|
go doDial(func() (net.Conn, error) {
|
|
|
|
return (&utp.Dialer{Timeout: dialTimeout}).Dial("utp", addr)
|
|
|
|
}, resCh, true)
|
|
|
|
|
|
|
|
var res dialResult
|
|
|
|
for ; left > 0 && res.Conn == nil; left-- {
|
|
|
|
res = <-resCh
|
|
|
|
}
|
2014-07-22 19:45:12 +08:00
|
|
|
// Whether or not the connection attempt succeeds, the half open
|
|
|
|
// counter should be decremented, and new connection attempts made.
|
2014-04-03 20:16:59 +08:00
|
|
|
go func() {
|
|
|
|
me.mu.Lock()
|
|
|
|
defer me.mu.Unlock()
|
2014-11-17 03:30:44 +08:00
|
|
|
if _, ok := t.HalfOpen[addr]; !ok {
|
|
|
|
panic("invariant broken")
|
2014-04-03 20:16:59 +08:00
|
|
|
}
|
2014-11-17 03:30:44 +08:00
|
|
|
delete(t.HalfOpen, addr)
|
2014-04-03 20:16:59 +08:00
|
|
|
me.openNewConns()
|
|
|
|
}()
|
2014-11-17 13:27:01 +08:00
|
|
|
if res.Conn == nil {
|
2014-11-17 03:31:11 +08:00
|
|
|
return
|
|
|
|
}
|
2014-11-17 13:27:01 +08:00
|
|
|
if left > 0 {
|
|
|
|
go func() {
|
|
|
|
for ; left > 0; left-- {
|
|
|
|
conn := (<-resCh).Conn
|
|
|
|
if conn != nil {
|
|
|
|
conn.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2013-09-29 06:11:24 +08:00
|
|
|
}
|
2014-11-17 13:27:01 +08:00
|
|
|
|
2014-07-16 15:06:18 +08:00
|
|
|
// log.Printf("connected to %s", conn.RemoteAddr())
|
2014-11-17 13:27:01 +08:00
|
|
|
err := me.runConnection(res.Conn, t, peer.Source, res.UTP)
|
2013-10-15 16:42:30 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Print(err)
|
|
|
|
}
|
2013-09-29 06:11:24 +08:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2014-11-17 03:16:26 +08:00
|
|
|
// The port number for incoming peer connections. 0 if the client isn't
|
|
|
|
// listening.
|
2014-06-29 16:57:49 +08:00
|
|
|
func (cl *Client) incomingPeerPort() int {
|
2014-11-17 03:16:26 +08:00
|
|
|
listenAddr := cl.ListenAddr()
|
|
|
|
if listenAddr == nil {
|
2014-06-29 16:57:49 +08:00
|
|
|
return 0
|
|
|
|
}
|
2014-11-17 03:16:26 +08:00
|
|
|
return addrPort(listenAddr)
|
2014-06-29 16:57:49 +08:00
|
|
|
}
|
|
|
|
|
2014-11-17 03:16:26 +08:00
|
|
|
// Convert a net.Addr to its compact IP representation. Either 4 or 16 bytes
|
|
|
|
// per "yourip" field of http://www.bittorrent.org/beps/bep_0010.html.
|
2014-07-22 19:45:12 +08:00
|
|
|
func addrCompactIP(addr net.Addr) (string, error) {
|
2014-11-17 03:16:26 +08:00
|
|
|
host, _, err := net.SplitHostPort(addr.String())
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
ip := net.ParseIP(host)
|
|
|
|
if v4 := ip.To4(); v4 != nil {
|
|
|
|
if len(v4) != 4 {
|
|
|
|
panic(v4)
|
2014-07-22 19:45:12 +08:00
|
|
|
}
|
2014-11-17 03:16:26 +08:00
|
|
|
return string(v4), nil
|
2014-07-22 19:45:12 +08:00
|
|
|
}
|
2014-11-17 03:16:26 +08:00
|
|
|
return string(ip.To16()), nil
|
2014-07-22 19:45:12 +08:00
|
|
|
}
|
|
|
|
|
2014-08-21 16:12:49 +08:00
|
|
|
func handshakeWriter(w io.WriteCloser, bb <-chan []byte, done chan<- error) {
|
|
|
|
var err error
|
|
|
|
for b := range bb {
|
|
|
|
_, err = w.Write(b)
|
|
|
|
if err != nil {
|
|
|
|
w.Close()
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
done <- err
|
|
|
|
}
|
|
|
|
|
|
|
|
type peerExtensionBytes [8]byte
|
|
|
|
type peerID [20]byte
|
|
|
|
|
|
|
|
type handshakeResult struct {
|
|
|
|
peerExtensionBytes
|
|
|
|
peerID
|
|
|
|
InfoHash
|
|
|
|
}
|
|
|
|
|
|
|
|
func handshake(sock io.ReadWriteCloser, ih *InfoHash, peerID [20]byte) (res handshakeResult, ok bool, err error) {
|
|
|
|
// Bytes to be sent to the peer. Should never block the sender.
|
|
|
|
postCh := make(chan []byte, 4)
|
|
|
|
// A single error value sent when the writer completes.
|
|
|
|
writeDone := make(chan error, 1)
|
|
|
|
// Performs writes to the socket and ensures posts don't block.
|
|
|
|
go handshakeWriter(sock, postCh, writeDone)
|
|
|
|
|
2014-03-20 19:01:56 +08:00
|
|
|
defer func() {
|
2014-08-21 16:12:49 +08:00
|
|
|
close(postCh) // Done writing.
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
// Wait until writes complete before returning from handshake.
|
|
|
|
err = <-writeDone
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error writing during handshake: %s", err)
|
|
|
|
}
|
2014-03-20 19:01:56 +08:00
|
|
|
}()
|
2014-08-21 16:12:49 +08:00
|
|
|
|
|
|
|
post := func(bb []byte) {
|
|
|
|
select {
|
|
|
|
case postCh <- bb:
|
|
|
|
default:
|
|
|
|
panic("mustn't block while posting")
|
|
|
|
}
|
2014-03-20 21:14:17 +08:00
|
|
|
}
|
2014-08-21 16:12:49 +08:00
|
|
|
|
|
|
|
post([]byte(pp.Protocol))
|
|
|
|
post([]byte(extensionBytes))
|
|
|
|
if ih != nil { // We already know what we want.
|
|
|
|
post(ih[:])
|
|
|
|
post(peerID[:])
|
|
|
|
}
|
|
|
|
var b [68]byte
|
|
|
|
_, err = io.ReadFull(sock, b[:68])
|
2013-10-20 22:07:01 +08:00
|
|
|
if err != nil {
|
2014-08-21 16:12:49 +08:00
|
|
|
err = nil
|
2013-10-14 22:39:12 +08:00
|
|
|
return
|
2013-09-29 14:45:17 +08:00
|
|
|
}
|
2014-05-21 16:01:58 +08:00
|
|
|
if string(b[:20]) != pp.Protocol {
|
2013-09-29 14:45:17 +08:00
|
|
|
return
|
|
|
|
}
|
2014-08-25 03:25:52 +08:00
|
|
|
CopyExact(&res.peerExtensionBytes, b[20:28])
|
|
|
|
CopyExact(&res.InfoHash, b[28:48])
|
|
|
|
CopyExact(&res.peerID, b[48:68])
|
2014-08-21 16:12:49 +08:00
|
|
|
|
|
|
|
if ih == nil { // We were waiting for the peer to tell us what they wanted.
|
|
|
|
post(res.InfoHash[:])
|
|
|
|
post(peerID[:])
|
2013-09-29 14:45:17 +08:00
|
|
|
}
|
2014-08-21 16:12:49 +08:00
|
|
|
|
|
|
|
ok = true
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-08-28 07:45:20 +08:00
|
|
|
type peerConn struct {
|
|
|
|
net.Conn
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pc peerConn) Read(b []byte) (n int, err error) {
|
|
|
|
err = pc.Conn.SetReadDeadline(time.Now().Add(150 * time.Second))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
n, err = pc.Conn.Read(b)
|
2014-09-14 01:45:38 +08:00
|
|
|
if err != nil {
|
|
|
|
if opError, ok := err.(*net.OpError); ok && opError.Op == "read" && opError.Err == syscall.ECONNRESET {
|
|
|
|
err = io.EOF
|
|
|
|
} else if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
|
|
|
if n != 0 {
|
|
|
|
panic(n)
|
|
|
|
}
|
|
|
|
err = io.EOF
|
|
|
|
}
|
|
|
|
}
|
2014-08-28 07:45:20 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-11-17 03:29:31 +08:00
|
|
|
func (me *Client) runConnection(sock net.Conn, torrent *torrent, discovery peerSource, uTP bool) (err error) {
|
2014-08-28 08:06:57 +08:00
|
|
|
if tcpConn, ok := sock.(*net.TCPConn); ok {
|
|
|
|
tcpConn.SetLinger(0)
|
|
|
|
}
|
2014-08-21 16:12:49 +08:00
|
|
|
defer sock.Close()
|
2014-08-28 07:45:20 +08:00
|
|
|
me.mu.Lock()
|
|
|
|
me.handshaking++
|
|
|
|
me.mu.Unlock()
|
|
|
|
// One minute to complete handshake.
|
|
|
|
sock.SetDeadline(time.Now().Add(time.Minute))
|
2014-08-21 16:12:49 +08:00
|
|
|
hsRes, ok, err := handshake(sock, func() *InfoHash {
|
2013-09-29 14:45:17 +08:00
|
|
|
if torrent == nil {
|
2014-08-21 16:12:49 +08:00
|
|
|
return nil
|
|
|
|
} else {
|
|
|
|
return &torrent.InfoHash
|
2013-09-29 14:45:17 +08:00
|
|
|
}
|
2014-08-21 16:12:49 +08:00
|
|
|
}(), me.peerID)
|
2014-08-28 07:45:20 +08:00
|
|
|
me.mu.Lock()
|
|
|
|
defer me.mu.Unlock()
|
|
|
|
if me.handshaking == 0 {
|
|
|
|
panic("handshake count invariant is broken")
|
|
|
|
}
|
|
|
|
me.handshaking--
|
2014-08-21 16:12:49 +08:00
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error during handshake: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !ok {
|
|
|
|
return
|
2013-09-29 14:45:17 +08:00
|
|
|
}
|
2014-11-17 03:54:00 +08:00
|
|
|
if hsRes.peerID == me.peerID {
|
|
|
|
return
|
|
|
|
}
|
2014-08-21 16:12:49 +08:00
|
|
|
torrent = me.torrent(hsRes.InfoHash)
|
|
|
|
if torrent == nil {
|
|
|
|
return
|
|
|
|
}
|
2014-08-28 07:45:20 +08:00
|
|
|
sock.SetWriteDeadline(time.Time{})
|
|
|
|
sock = peerConn{sock}
|
2014-11-17 03:29:31 +08:00
|
|
|
conn := newConnection(sock, hsRes.peerExtensionBytes, hsRes.peerID, uTP)
|
2014-08-21 16:12:49 +08:00
|
|
|
defer conn.Close()
|
|
|
|
conn.Discovery = discovery
|
2014-03-16 23:30:10 +08:00
|
|
|
if !me.addConnection(torrent, conn) {
|
|
|
|
return
|
|
|
|
}
|
2014-08-21 16:12:49 +08:00
|
|
|
if conn.PeerExtensionBytes[5]&0x10 != 0 {
|
2014-06-26 22:57:07 +08:00
|
|
|
conn.Post(pp.Message{
|
|
|
|
Type: pp.Extended,
|
|
|
|
ExtendedID: pp.HandshakeExtendedID,
|
|
|
|
ExtendedPayload: func() []byte {
|
2014-06-28 17:38:31 +08:00
|
|
|
d := map[string]interface{}{
|
2014-06-26 22:57:07 +08:00
|
|
|
"m": map[string]int{
|
|
|
|
"ut_metadata": 1,
|
2014-06-29 17:07:43 +08:00
|
|
|
"ut_pex": 2,
|
2014-06-26 22:57:07 +08:00
|
|
|
},
|
2014-08-25 20:12:50 +08:00
|
|
|
"v": "go.torrent dev 20140825", // Just the date
|
|
|
|
// No upload queue is implemented yet.
|
2014-08-28 08:05:41 +08:00
|
|
|
"reqq": func() int {
|
|
|
|
if me.noUpload {
|
|
|
|
// No need to look strange if it costs us nothing.
|
|
|
|
return 250
|
|
|
|
} else {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
}(),
|
2014-06-28 17:38:31 +08:00
|
|
|
}
|
|
|
|
if torrent.metadataSizeKnown() {
|
|
|
|
d["metadata_size"] = torrent.metadataSize()
|
|
|
|
}
|
2014-06-29 16:57:49 +08:00
|
|
|
if p := me.incomingPeerPort(); p != 0 {
|
|
|
|
d["p"] = p
|
|
|
|
}
|
2014-07-22 19:45:12 +08:00
|
|
|
yourip, err := addrCompactIP(conn.Socket.RemoteAddr())
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error calculating yourip field value in extension handshake: %s", err)
|
|
|
|
} else {
|
|
|
|
d["yourip"] = yourip
|
|
|
|
}
|
2014-07-24 11:43:45 +08:00
|
|
|
// log.Printf("sending %v", d)
|
2014-06-28 17:38:31 +08:00
|
|
|
b, err := bencode.Marshal(d)
|
2014-06-26 22:57:07 +08:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return b
|
|
|
|
}(),
|
|
|
|
})
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
if torrent.haveAnyPieces() {
|
2014-05-21 16:01:58 +08:00
|
|
|
conn.Post(pp.Message{
|
|
|
|
Type: pp.Bitfield,
|
2013-10-20 22:07:01 +08:00
|
|
|
Bitfield: torrent.bitfield(),
|
|
|
|
})
|
|
|
|
}
|
2014-08-25 20:12:16 +08:00
|
|
|
if conn.PeerExtensionBytes[7]&0x01 != 0 && me.dHT != nil {
|
|
|
|
addr, _ := me.dHT.LocalAddr().(*net.UDPAddr)
|
|
|
|
conn.Post(pp.Message{
|
|
|
|
Type: pp.Port,
|
|
|
|
Port: uint16(addr.Port),
|
|
|
|
})
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
err = me.connectionLoop(torrent, conn)
|
|
|
|
if err != nil {
|
2014-08-21 16:12:49 +08:00
|
|
|
err = fmt.Errorf("during Connection loop with peer %q: %s", conn.PeerID, err)
|
2013-10-20 22:07:01 +08:00
|
|
|
}
|
|
|
|
me.dropConnection(torrent, conn)
|
2013-10-15 16:42:30 +08:00
|
|
|
return
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
|
|
|
|
2014-06-26 22:57:07 +08:00
|
|
|
func (me *Client) peerGotPiece(t *torrent, c *connection, piece int) {
|
|
|
|
for piece >= len(c.PeerPieces) {
|
|
|
|
c.PeerPieces = append(c.PeerPieces, false)
|
2013-10-01 16:43:18 +08:00
|
|
|
}
|
2014-06-26 22:57:07 +08:00
|
|
|
c.PeerPieces[piece] = true
|
2014-09-14 02:03:23 +08:00
|
|
|
if !t.havePiece(piece) {
|
2014-06-26 22:57:07 +08:00
|
|
|
me.replenishConnRequests(t, c)
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
2013-10-01 16:43:18 +08:00
|
|
|
}
|
|
|
|
|
2014-04-09 00:36:05 +08:00
|
|
|
func (me *Client) peerUnchoked(torrent *torrent, conn *connection) {
|
2013-10-01 16:43:18 +08:00
|
|
|
me.replenishConnRequests(torrent, conn)
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
|
|
|
|
2014-05-23 19:01:05 +08:00
|
|
|
func (cl *Client) connCancel(t *torrent, cn *connection, r request) (ok bool) {
|
|
|
|
ok = cn.Cancel(r)
|
|
|
|
if ok {
|
2014-08-24 01:10:47 +08:00
|
|
|
postedCancels.Add(1)
|
2014-08-21 16:07:06 +08:00
|
|
|
cl.downloadStrategy.DeleteRequest(t, r)
|
2014-05-23 19:01:05 +08:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) connDeleteRequest(t *torrent, cn *connection, r request) {
|
|
|
|
if !cn.RequestPending(r) {
|
|
|
|
return
|
|
|
|
}
|
2014-08-21 16:07:06 +08:00
|
|
|
cl.downloadStrategy.DeleteRequest(t, r)
|
2014-05-23 19:01:05 +08:00
|
|
|
delete(cn.Requests, r)
|
|
|
|
}
|
|
|
|
|
2014-06-26 22:57:07 +08:00
|
|
|
func (cl *Client) requestPendingMetadata(t *torrent, c *connection) {
|
2014-06-27 16:57:35 +08:00
|
|
|
if t.haveInfo() {
|
|
|
|
return
|
|
|
|
}
|
2014-06-26 22:57:07 +08:00
|
|
|
var pending []int
|
2014-06-28 17:38:31 +08:00
|
|
|
for index := 0; index < t.MetadataPieceCount(); index++ {
|
|
|
|
if !t.HaveMetadataPiece(index) {
|
2014-06-26 22:57:07 +08:00
|
|
|
pending = append(pending, index)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, i := range mathRand.Perm(len(pending)) {
|
|
|
|
c.Post(pp.Message{
|
|
|
|
Type: pp.Extended,
|
|
|
|
ExtendedID: byte(c.PeerExtensionIDs["ut_metadata"]),
|
|
|
|
ExtendedPayload: func() []byte {
|
|
|
|
b, err := bencode.Marshal(map[string]int{
|
|
|
|
"msg_type": 0,
|
|
|
|
"piece": pending[i],
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return b
|
|
|
|
}(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-28 17:38:31 +08:00
|
|
|
func (cl *Client) completedMetadata(t *torrent) {
|
|
|
|
h := sha1.New()
|
|
|
|
h.Write(t.MetaData)
|
|
|
|
var ih InfoHash
|
2014-08-21 16:12:49 +08:00
|
|
|
CopyExact(&ih, h.Sum(nil))
|
2014-06-28 17:38:31 +08:00
|
|
|
if ih != t.InfoHash {
|
|
|
|
log.Print("bad metadata")
|
|
|
|
t.InvalidateMetadata()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var info metainfo.Info
|
|
|
|
err := bencode.Unmarshal(t.MetaData, &info)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error unmarshalling metadata: %s", err)
|
|
|
|
t.InvalidateMetadata()
|
|
|
|
return
|
|
|
|
}
|
2014-07-14 21:12:52 +08:00
|
|
|
// TODO(anacrolix): If this fails, I think something harsher should be
|
|
|
|
// done.
|
2014-06-29 13:45:21 +08:00
|
|
|
err = cl.setMetaData(t, info, t.MetaData)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error setting metadata: %s", err)
|
|
|
|
t.InvalidateMetadata()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
log.Printf("%s: got metadata from peers", t)
|
2014-06-28 17:38:31 +08:00
|
|
|
}
|
|
|
|
|
2014-08-21 16:12:49 +08:00
|
|
|
// Process incoming ut_metadata message.
|
2014-06-28 17:38:31 +08:00
|
|
|
func (cl *Client) gotMetadataExtensionMsg(payload []byte, t *torrent, c *connection) (err error) {
|
|
|
|
var d map[string]int
|
|
|
|
err = bencode.Unmarshal(payload, &d)
|
|
|
|
if err != nil {
|
2014-06-30 22:05:28 +08:00
|
|
|
err = fmt.Errorf("error unmarshalling payload: %s: %q", err, payload)
|
2014-06-28 17:38:31 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
msgType, ok := d["msg_type"]
|
|
|
|
if !ok {
|
|
|
|
err = errors.New("missing msg_type field")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
piece := d["piece"]
|
|
|
|
switch msgType {
|
|
|
|
case pp.DataMetadataExtensionMsgType:
|
|
|
|
if t.haveInfo() {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
t.SaveMetadataPiece(piece, payload[len(payload)-metadataPieceSize(d["total_size"], piece):])
|
|
|
|
if !t.HaveAllMetadataPieces() {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
cl.completedMetadata(t)
|
|
|
|
case pp.RequestMetadataExtensionMsgType:
|
|
|
|
if !t.HaveMetadataPiece(piece) {
|
|
|
|
c.Post(t.NewMetadataExtensionMessage(c, pp.RejectMetadataExtensionMsgType, d["piece"], nil))
|
|
|
|
break
|
|
|
|
}
|
2014-08-21 16:12:49 +08:00
|
|
|
start := (1 << 14) * piece
|
|
|
|
c.Post(t.NewMetadataExtensionMessage(c, pp.DataMetadataExtensionMsgType, piece, t.MetaData[start:start+t.metadataPieceSize(piece)]))
|
2014-06-28 17:38:31 +08:00
|
|
|
case pp.RejectMetadataExtensionMsgType:
|
|
|
|
default:
|
|
|
|
err = errors.New("unknown msg_type value")
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-06-29 17:07:43 +08:00
|
|
|
type peerExchangeMessage struct {
|
2014-08-21 16:12:49 +08:00
|
|
|
Added CompactPeers `bencode:"added"`
|
|
|
|
AddedFlags []byte `bencode:"added.f"`
|
|
|
|
Dropped []tracker.Peer `bencode:"dropped"`
|
2014-06-29 17:07:43 +08:00
|
|
|
}
|
|
|
|
|
2014-11-17 03:16:26 +08:00
|
|
|
// Extracts the port as an integer from an address string.
|
|
|
|
func addrPort(addr net.Addr) int {
|
2014-11-17 11:20:49 +08:00
|
|
|
return AddrPort(addr)
|
2014-11-17 03:16:26 +08:00
|
|
|
}
|
|
|
|
|
2014-07-24 11:43:45 +08:00
|
|
|
// Processes incoming bittorrent messages. The client lock is held upon entry
|
|
|
|
// and exit.
|
2014-05-21 15:55:50 +08:00
|
|
|
func (me *Client) connectionLoop(t *torrent, c *connection) error {
|
2014-05-21 16:01:58 +08:00
|
|
|
decoder := pp.Decoder{
|
2014-09-14 01:47:06 +08:00
|
|
|
R: bufio.NewReaderSize(c.Socket, 20*1024),
|
2013-09-30 19:51:08 +08:00
|
|
|
MaxLength: 256 * 1024,
|
|
|
|
}
|
|
|
|
for {
|
2013-10-20 22:07:01 +08:00
|
|
|
me.mu.Unlock()
|
2014-05-21 16:01:58 +08:00
|
|
|
var msg pp.Message
|
2014-05-21 15:48:44 +08:00
|
|
|
err := decoder.Decode(&msg)
|
2013-10-20 22:07:01 +08:00
|
|
|
me.mu.Lock()
|
2014-08-28 07:31:05 +08:00
|
|
|
c.lastMessageReceived = time.Now()
|
|
|
|
select {
|
|
|
|
case <-c.closing:
|
2014-06-26 16:06:33 +08:00
|
|
|
return nil
|
2014-08-28 07:31:05 +08:00
|
|
|
default:
|
2014-06-26 16:06:33 +08:00
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
if err != nil {
|
2014-03-20 21:14:17 +08:00
|
|
|
if me.stopped() || err == io.EOF {
|
2014-03-20 13:58:09 +08:00
|
|
|
return nil
|
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if msg.Keepalive {
|
|
|
|
continue
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
switch msg.Type {
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.Choke:
|
2014-05-21 15:55:50 +08:00
|
|
|
c.PeerChoked = true
|
2014-05-23 19:01:05 +08:00
|
|
|
for r := range c.Requests {
|
|
|
|
me.connDeleteRequest(t, c, r)
|
|
|
|
}
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.Unchoke:
|
2014-05-21 15:55:50 +08:00
|
|
|
c.PeerChoked = false
|
|
|
|
me.peerUnchoked(t, c)
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.Interested:
|
2014-05-21 15:55:50 +08:00
|
|
|
c.PeerInterested = true
|
2014-03-17 22:44:22 +08:00
|
|
|
// TODO: This should be done from a dedicated unchoking routine.
|
2014-08-25 04:00:29 +08:00
|
|
|
if me.noUpload {
|
|
|
|
break
|
|
|
|
}
|
2014-05-21 15:55:50 +08:00
|
|
|
c.Unchoke()
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.NotInterested:
|
2014-05-21 15:55:50 +08:00
|
|
|
c.PeerInterested = false
|
|
|
|
c.Choke()
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.Have:
|
2014-05-21 15:55:50 +08:00
|
|
|
me.peerGotPiece(t, c, int(msg.Index))
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.Request:
|
2014-08-25 04:00:29 +08:00
|
|
|
if me.noUpload {
|
|
|
|
break
|
|
|
|
}
|
2014-05-21 15:55:50 +08:00
|
|
|
if c.PeerRequests == nil {
|
|
|
|
c.PeerRequests = make(map[request]struct{}, maxRequests)
|
2014-03-17 22:44:22 +08:00
|
|
|
}
|
2014-05-21 15:42:06 +08:00
|
|
|
request := newRequest(msg.Index, msg.Begin, msg.Length)
|
2014-03-17 22:44:22 +08:00
|
|
|
// TODO: Requests should be satisfied from a dedicated upload routine.
|
2014-07-16 15:08:47 +08:00
|
|
|
// c.PeerRequests[request] = struct{}{}
|
2014-03-17 22:44:22 +08:00
|
|
|
p := make([]byte, msg.Length)
|
2014-05-21 15:55:50 +08:00
|
|
|
n, err := t.Data.ReadAt(p, int64(t.PieceLength(0))*int64(msg.Index)+int64(msg.Begin))
|
2014-03-17 22:44:22 +08:00
|
|
|
if err != nil {
|
2014-06-26 15:30:16 +08:00
|
|
|
return fmt.Errorf("reading t data to serve request %q: %s", request, err)
|
2014-03-17 22:44:22 +08:00
|
|
|
}
|
|
|
|
if n != int(msg.Length) {
|
2014-06-26 15:30:16 +08:00
|
|
|
return fmt.Errorf("bad request: %v", msg)
|
2014-03-17 22:44:22 +08:00
|
|
|
}
|
2014-05-21 16:01:58 +08:00
|
|
|
c.Post(pp.Message{
|
|
|
|
Type: pp.Piece,
|
2014-03-17 22:44:22 +08:00
|
|
|
Index: msg.Index,
|
|
|
|
Begin: msg.Begin,
|
|
|
|
Piece: p,
|
|
|
|
})
|
2014-08-22 15:33:17 +08:00
|
|
|
uploadChunksPosted.Add(1)
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.Cancel:
|
2014-04-16 15:33:33 +08:00
|
|
|
req := newRequest(msg.Index, msg.Begin, msg.Length)
|
2014-05-21 15:55:50 +08:00
|
|
|
if !c.PeerCancel(req) {
|
2014-08-22 15:47:44 +08:00
|
|
|
unexpectedCancels.Add(1)
|
2014-04-16 15:33:33 +08:00
|
|
|
}
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.Bitfield:
|
2014-05-21 15:55:50 +08:00
|
|
|
if c.PeerPieces != nil {
|
2013-10-20 22:07:01 +08:00
|
|
|
err = errors.New("received unexpected bitfield")
|
|
|
|
break
|
|
|
|
}
|
2014-06-26 22:57:07 +08:00
|
|
|
if t.haveInfo() {
|
|
|
|
if len(msg.Bitfield) < t.NumPieces() {
|
|
|
|
err = errors.New("received invalid bitfield")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
msg.Bitfield = msg.Bitfield[:t.NumPieces()]
|
|
|
|
}
|
|
|
|
c.PeerPieces = msg.Bitfield
|
2014-05-21 15:55:50 +08:00
|
|
|
for index, has := range c.PeerPieces {
|
2013-10-20 22:07:01 +08:00
|
|
|
if has {
|
2014-05-21 15:55:50 +08:00
|
|
|
me.peerGotPiece(t, c, index)
|
2013-10-02 15:57:59 +08:00
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
2014-05-21 16:01:58 +08:00
|
|
|
case pp.Piece:
|
2014-05-21 15:55:50 +08:00
|
|
|
err = me.downloadedChunk(t, c, &msg)
|
2014-06-26 22:57:07 +08:00
|
|
|
case pp.Extended:
|
|
|
|
switch msg.ExtendedID {
|
|
|
|
case pp.HandshakeExtendedID:
|
2014-06-29 16:57:49 +08:00
|
|
|
// TODO: Create a bencode struct for this.
|
2014-06-26 22:57:07 +08:00
|
|
|
var d map[string]interface{}
|
|
|
|
err = bencode.Unmarshal(msg.ExtendedPayload, &d)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error decoding extended message payload: %s", err)
|
|
|
|
break
|
|
|
|
}
|
2014-11-17 13:27:01 +08:00
|
|
|
// log.Printf("got handshake from %q: %#v", c.Socket.RemoteAddr().String(), d)
|
2014-06-29 16:57:49 +08:00
|
|
|
if reqq, ok := d["reqq"]; ok {
|
|
|
|
if i, ok := reqq.(int64); ok {
|
|
|
|
c.PeerMaxRequests = int(i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if v, ok := d["v"]; ok {
|
|
|
|
c.PeerClientName = v.(string)
|
|
|
|
}
|
2014-06-26 22:57:07 +08:00
|
|
|
m, ok := d["m"]
|
|
|
|
if !ok {
|
|
|
|
err = errors.New("handshake missing m item")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
mTyped, ok := m.(map[string]interface{})
|
|
|
|
if !ok {
|
|
|
|
err = errors.New("handshake m value is not dict")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if c.PeerExtensionIDs == nil {
|
|
|
|
c.PeerExtensionIDs = make(map[string]int64, len(mTyped))
|
|
|
|
}
|
|
|
|
for name, v := range mTyped {
|
|
|
|
id, ok := v.(int64)
|
|
|
|
if !ok {
|
|
|
|
log.Printf("bad handshake m item extension ID type: %T", v)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if id == 0 {
|
|
|
|
delete(c.PeerExtensionIDs, name)
|
|
|
|
} else {
|
|
|
|
c.PeerExtensionIDs[name] = id
|
|
|
|
}
|
|
|
|
}
|
|
|
|
metadata_sizeUntyped, ok := d["metadata_size"]
|
|
|
|
if ok {
|
|
|
|
metadata_size, ok := metadata_sizeUntyped.(int64)
|
|
|
|
if !ok {
|
|
|
|
log.Printf("bad metadata_size type: %T", metadata_sizeUntyped)
|
|
|
|
} else {
|
2014-06-28 17:38:31 +08:00
|
|
|
t.SetMetadataSize(metadata_size)
|
2014-06-26 22:57:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if _, ok := c.PeerExtensionIDs["ut_metadata"]; ok {
|
|
|
|
me.requestPendingMetadata(t, c)
|
|
|
|
}
|
|
|
|
case 1:
|
2014-06-28 17:38:31 +08:00
|
|
|
err = me.gotMetadataExtensionMsg(msg.ExtendedPayload, t, c)
|
2014-06-30 22:05:28 +08:00
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error handling metadata extension message: %s", err)
|
|
|
|
}
|
2014-06-29 17:07:43 +08:00
|
|
|
case 2:
|
|
|
|
var pexMsg peerExchangeMessage
|
|
|
|
err := bencode.Unmarshal(msg.ExtendedPayload, &pexMsg)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error unmarshalling PEX message: %s", err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
go func() {
|
|
|
|
err := me.AddPeers(t.InfoHash, func() (ret []Peer) {
|
|
|
|
for _, cp := range pexMsg.Added {
|
|
|
|
p := Peer{
|
2014-07-16 15:06:18 +08:00
|
|
|
IP: make([]byte, 4),
|
|
|
|
Port: int(cp.Port),
|
|
|
|
Source: peerSourcePEX,
|
2014-06-29 17:07:43 +08:00
|
|
|
}
|
|
|
|
if n := copy(p.IP, cp.IP[:]); n != 4 {
|
|
|
|
panic(n)
|
|
|
|
}
|
|
|
|
ret = append(ret, p)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}())
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error adding PEX peers: %s", err)
|
|
|
|
return
|
|
|
|
}
|
2014-08-21 23:33:13 +08:00
|
|
|
peersFoundByPEX.Add(int64(len(pexMsg.Added)))
|
2014-06-29 17:07:43 +08:00
|
|
|
}()
|
2014-06-28 17:38:31 +08:00
|
|
|
default:
|
2014-07-10 00:59:37 +08:00
|
|
|
err = fmt.Errorf("unexpected extended message ID: %v", msg.ExtendedID)
|
2014-06-26 22:57:07 +08:00
|
|
|
}
|
2014-08-21 16:12:49 +08:00
|
|
|
if err != nil {
|
2014-09-11 12:20:47 +08:00
|
|
|
// That client uses its own extension IDs for outgoing message
|
|
|
|
// types, which is incorrect.
|
2014-09-14 01:47:47 +08:00
|
|
|
if bytes.HasPrefix(c.PeerID[:], []byte("-SD0100-")) ||
|
|
|
|
strings.HasPrefix(string(c.PeerID[:]), "-XL0012-") {
|
2014-09-11 12:20:47 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// log.Printf("peer extension map: %#v", c.PeerExtensionIDs)
|
2014-08-21 16:12:49 +08:00
|
|
|
}
|
2014-08-25 20:12:16 +08:00
|
|
|
case pp.Port:
|
|
|
|
if me.dHT == nil {
|
|
|
|
break
|
|
|
|
}
|
2014-11-17 03:16:26 +08:00
|
|
|
pingAddr, err := net.ResolveUDPAddr("", c.Socket.RemoteAddr().String())
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
if msg.Port != 0 {
|
|
|
|
pingAddr.Port = int(msg.Port)
|
|
|
|
}
|
|
|
|
_, err = me.dHT.Ping(pingAddr)
|
2013-10-20 22:07:01 +08:00
|
|
|
default:
|
2014-05-21 15:42:06 +08:00
|
|
|
err = fmt.Errorf("received unknown message type: %#v", msg.Type)
|
2013-10-20 22:07:01 +08:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-09 00:36:05 +08:00
|
|
|
func (me *Client) dropConnection(torrent *torrent, conn *connection) {
|
2014-05-23 19:01:05 +08:00
|
|
|
for r := range conn.Requests {
|
|
|
|
me.connDeleteRequest(torrent, conn, r)
|
|
|
|
}
|
2014-08-28 08:04:44 +08:00
|
|
|
conn.Close()
|
2013-09-30 19:51:08 +08:00
|
|
|
for i0, c := range torrent.Conns {
|
|
|
|
if c != conn {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
i1 := len(torrent.Conns) - 1
|
|
|
|
if i0 != i1 {
|
|
|
|
torrent.Conns[i0] = torrent.Conns[i1]
|
|
|
|
}
|
|
|
|
torrent.Conns = torrent.Conns[:i1]
|
2014-11-17 11:37:34 +08:00
|
|
|
me.openNewConns()
|
2013-09-30 19:51:08 +08:00
|
|
|
return
|
|
|
|
}
|
2014-05-23 19:02:11 +08:00
|
|
|
panic("connection not found")
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
|
|
|
|
2014-04-09 00:36:05 +08:00
|
|
|
func (me *Client) addConnection(t *torrent, c *connection) bool {
|
2014-06-26 16:06:33 +08:00
|
|
|
if me.stopped() {
|
|
|
|
return false
|
|
|
|
}
|
2014-08-28 07:39:27 +08:00
|
|
|
select {
|
|
|
|
case <-t.ceasingNetworking:
|
|
|
|
return false
|
|
|
|
default:
|
|
|
|
}
|
2014-03-16 23:30:10 +08:00
|
|
|
for _, c0 := range t.Conns {
|
2014-08-21 16:12:49 +08:00
|
|
|
if c.PeerID == c0.PeerID {
|
2014-05-21 15:42:06 +08:00
|
|
|
// Already connected to a client with that ID.
|
2013-09-30 19:51:08 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t.Conns = append(t.Conns, c)
|
2014-08-28 08:06:36 +08:00
|
|
|
if len(t.Conns) > socketsPerTorrent {
|
|
|
|
wcs := t.worstConnsHeap()
|
|
|
|
heap.Pop(wcs).(*connection).Close()
|
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
return true
|
2013-09-29 06:11:24 +08:00
|
|
|
}
|
|
|
|
|
2013-10-06 15:01:39 +08:00
|
|
|
func (me *Client) openNewConns() {
|
2013-09-29 06:11:24 +08:00
|
|
|
for _, t := range me.torrents {
|
2014-08-28 07:39:27 +08:00
|
|
|
select {
|
|
|
|
case <-t.ceasingNetworking:
|
|
|
|
continue
|
|
|
|
default:
|
|
|
|
}
|
2013-09-29 06:11:24 +08:00
|
|
|
for len(t.Peers) != 0 {
|
2014-11-17 03:30:44 +08:00
|
|
|
if len(t.HalfOpen) >= me.halfOpenLimit {
|
2013-09-29 06:11:24 +08:00
|
|
|
return
|
|
|
|
}
|
2014-11-17 03:30:44 +08:00
|
|
|
if len(t.HalfOpen)+me.handshaking+len(t.Conns) >= socketsPerTorrent {
|
2014-08-28 08:06:36 +08:00
|
|
|
break
|
|
|
|
}
|
2014-08-21 19:10:19 +08:00
|
|
|
var (
|
|
|
|
k peersKey
|
|
|
|
p Peer
|
|
|
|
)
|
|
|
|
for k, p = range t.Peers {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
delete(t.Peers, k)
|
2013-09-29 06:11:24 +08:00
|
|
|
me.initiateConn(p, t)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-09 00:36:05 +08:00
|
|
|
// Adds peers to the swarm for the torrent corresponding to infoHash.
|
2013-10-20 22:07:01 +08:00
|
|
|
func (me *Client) AddPeers(infoHash InfoHash, peers []Peer) error {
|
|
|
|
me.mu.Lock()
|
2014-07-22 23:51:30 +08:00
|
|
|
defer me.mu.Unlock()
|
2013-10-20 22:07:01 +08:00
|
|
|
t := me.torrent(infoHash)
|
|
|
|
if t == nil {
|
|
|
|
return errors.New("no such torrent")
|
|
|
|
}
|
2014-11-17 13:27:01 +08:00
|
|
|
// for _, p := range peers {
|
|
|
|
// log.Printf("adding peer for %q: %s", infoHash, p)
|
|
|
|
// }
|
2014-08-21 19:10:19 +08:00
|
|
|
t.AddPeers(peers)
|
2013-10-20 22:07:01 +08:00
|
|
|
me.openNewConns()
|
|
|
|
return nil
|
2013-09-29 06:11:24 +08:00
|
|
|
}
|
|
|
|
|
2014-06-28 17:38:31 +08:00
|
|
|
func (cl *Client) setMetaData(t *torrent, md metainfo.Info, bytes []byte) (err error) {
|
2014-08-21 16:07:06 +08:00
|
|
|
err = t.setMetadata(md, cl.dataDir, bytes)
|
2014-03-20 13:58:09 +08:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2014-09-15 01:25:53 +08:00
|
|
|
// If the client intends to upload, it needs to know what state pieces are
|
|
|
|
// in.
|
|
|
|
if !cl.noUpload {
|
|
|
|
// Queue all pieces for hashing. This is done sequentially to avoid
|
|
|
|
// spamming goroutines.
|
|
|
|
for _, p := range t.Pieces {
|
|
|
|
p.QueuedForHash = true
|
2014-06-26 22:57:07 +08:00
|
|
|
}
|
2014-09-15 01:25:53 +08:00
|
|
|
go func() {
|
|
|
|
for i := range t.Pieces {
|
|
|
|
cl.verifyPiece(t, pp.Integer(i))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
2014-06-26 22:57:07 +08:00
|
|
|
|
2014-08-21 16:07:06 +08:00
|
|
|
cl.downloadStrategy.TorrentStarted(t)
|
2014-09-25 16:05:52 +08:00
|
|
|
select {
|
|
|
|
case t.gotMetainfo <- &metainfo.MetaInfo{
|
|
|
|
Info: metainfo.InfoEx{
|
|
|
|
Info: md,
|
|
|
|
},
|
|
|
|
CreationDate: time.Now().Unix(),
|
|
|
|
Comment: "metadata set in client",
|
|
|
|
CreatedBy: "go.torrent",
|
|
|
|
// TODO(anacrolix): Expose trackers given when torrent added.
|
|
|
|
}:
|
|
|
|
default:
|
|
|
|
panic("shouldn't block")
|
|
|
|
}
|
|
|
|
close(t.gotMetainfo)
|
|
|
|
t.gotMetainfo = nil
|
2014-06-26 22:57:07 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare a Torrent without any attachment to a Client. That means we can
|
|
|
|
// initialize fields all fields that don't require the Client without locking
|
|
|
|
// it.
|
2014-11-17 03:30:44 +08:00
|
|
|
func newTorrent(ih InfoHash, announceList [][]string, halfOpenLimit int) (t *torrent, err error) {
|
2014-06-26 22:57:07 +08:00
|
|
|
t = &torrent{
|
|
|
|
InfoHash: ih,
|
2014-08-21 19:10:19 +08:00
|
|
|
Peers: make(map[peersKey]Peer, 2000),
|
2014-08-25 04:01:05 +08:00
|
|
|
|
2014-08-28 07:39:27 +08:00
|
|
|
closing: make(chan struct{}),
|
|
|
|
ceasingNetworking: make(chan struct{}),
|
2014-09-25 16:05:52 +08:00
|
|
|
|
|
|
|
gotMetainfo: make(chan *metainfo.MetaInfo, 1),
|
2014-11-17 03:30:44 +08:00
|
|
|
|
|
|
|
HalfOpen: make(map[string]struct{}, halfOpenLimit),
|
2014-06-26 22:57:07 +08:00
|
|
|
}
|
2014-09-25 16:05:52 +08:00
|
|
|
t.GotMetainfo = t.gotMetainfo
|
2014-06-26 22:57:07 +08:00
|
|
|
t.Trackers = make([][]tracker.Client, len(announceList))
|
|
|
|
for tierIndex := range announceList {
|
2014-04-09 00:36:05 +08:00
|
|
|
tier := t.Trackers[tierIndex]
|
2014-06-26 22:57:07 +08:00
|
|
|
for _, url := range announceList[tierIndex] {
|
2014-03-16 23:30:10 +08:00
|
|
|
tr, err := tracker.New(url)
|
|
|
|
if err != nil {
|
|
|
|
log.Print(err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
tier = append(tier, tr)
|
|
|
|
}
|
|
|
|
// The trackers within each tier must be shuffled before use.
|
|
|
|
// http://stackoverflow.com/a/12267471/149482
|
|
|
|
// http://www.bittorrent.org/beps/bep_0012.html#order-of-processing
|
|
|
|
for i := range tier {
|
|
|
|
j := mathRand.Intn(i + 1)
|
|
|
|
tier[i], tier[j] = tier[j], tier[i]
|
|
|
|
}
|
2014-04-09 00:36:05 +08:00
|
|
|
t.Trackers[tierIndex] = tier
|
2014-03-16 23:30:10 +08:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-09-25 16:05:52 +08:00
|
|
|
func (cl *Client) AddMagnet(uri string) (t *torrent, err error) {
|
2014-06-26 22:57:07 +08:00
|
|
|
m, err := ParseMagnetURI(uri)
|
2013-09-26 17:49:15 +08:00
|
|
|
if err != nil {
|
2014-06-26 22:57:07 +08:00
|
|
|
return
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
2014-11-17 03:30:44 +08:00
|
|
|
t, err = newTorrent(m.InfoHash, [][]string{m.Trackers}, cl.halfOpenLimit)
|
2014-06-26 22:57:07 +08:00
|
|
|
if err != nil {
|
|
|
|
return
|
2013-10-20 22:07:01 +08:00
|
|
|
}
|
2014-06-26 22:57:07 +08:00
|
|
|
t.DisplayName = m.DisplayName
|
|
|
|
cl.mu.Lock()
|
|
|
|
defer cl.mu.Unlock()
|
|
|
|
err = cl.addTorrent(t)
|
|
|
|
if err != nil {
|
|
|
|
t.Close()
|
2014-03-20 01:30:08 +08:00
|
|
|
}
|
2014-06-26 22:57:07 +08:00
|
|
|
return
|
|
|
|
}
|
2014-05-21 15:37:31 +08:00
|
|
|
|
2014-07-22 23:54:11 +08:00
|
|
|
func (me *Client) DropTorrent(infoHash InfoHash) (err error) {
|
|
|
|
me.mu.Lock()
|
|
|
|
defer me.mu.Unlock()
|
|
|
|
t, ok := me.torrents[infoHash]
|
|
|
|
if !ok {
|
|
|
|
err = fmt.Errorf("no such torrent")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
err = t.Close()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
delete(me.torrents, infoHash)
|
2014-08-21 23:25:18 +08:00
|
|
|
me.downloadStrategy.TorrentStopped(t)
|
2014-09-14 01:50:15 +08:00
|
|
|
for _, dw := range me.dataWaits[t] {
|
|
|
|
close(dw.ready)
|
|
|
|
}
|
|
|
|
delete(me.dataWaits, t)
|
2014-07-22 23:54:11 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-06-26 22:57:07 +08:00
|
|
|
func (me *Client) addTorrent(t *torrent) (err error) {
|
|
|
|
if _, ok := me.torrents[t.InfoHash]; ok {
|
|
|
|
err = fmt.Errorf("torrent infohash collision")
|
|
|
|
return
|
2014-03-17 22:44:22 +08:00
|
|
|
}
|
2014-06-26 22:57:07 +08:00
|
|
|
me.torrents[t.InfoHash] = t
|
2014-08-21 16:07:06 +08:00
|
|
|
if !me.disableTrackers {
|
2014-06-26 22:57:07 +08:00
|
|
|
go me.announceTorrent(t)
|
|
|
|
}
|
2014-08-21 16:07:06 +08:00
|
|
|
if me.dHT != nil {
|
2014-11-17 13:27:01 +08:00
|
|
|
go me.announceTorrentDHT(t, true)
|
2014-07-11 17:30:20 +08:00
|
|
|
}
|
2014-06-26 22:57:07 +08:00
|
|
|
return
|
|
|
|
}
|
2014-05-21 15:37:31 +08:00
|
|
|
|
2014-06-26 22:57:07 +08:00
|
|
|
// Adds the torrent to the client.
|
|
|
|
func (me *Client) AddTorrent(metaInfo *metainfo.MetaInfo) (err error) {
|
2014-08-21 16:24:19 +08:00
|
|
|
var ih InfoHash
|
|
|
|
CopyExact(&ih, metaInfo.Info.Hash)
|
2014-11-17 03:30:44 +08:00
|
|
|
t, err := newTorrent(ih, metaInfo.AnnounceList, me.halfOpenLimit)
|
2014-06-26 22:57:07 +08:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2014-06-27 16:57:35 +08:00
|
|
|
me.mu.Lock()
|
|
|
|
defer me.mu.Unlock()
|
2014-06-26 22:57:07 +08:00
|
|
|
err = me.addTorrent(t)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2014-07-13 15:37:12 +08:00
|
|
|
err = me.setMetaData(t, metaInfo.Info.Info, metaInfo.Info.Bytes)
|
2014-06-26 22:57:07 +08:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
return
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
|
|
|
|
2014-07-22 23:54:11 +08:00
|
|
|
func (me *Client) AddTorrentFromFile(name string) (err error) {
|
|
|
|
mi, err := metainfo.LoadFromFile(name)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error loading metainfo from file: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
return me.AddTorrent(mi)
|
|
|
|
}
|
|
|
|
|
2014-11-17 13:27:01 +08:00
|
|
|
func (cl *Client) announceTorrentDHT(t *torrent, impliedPort bool) {
|
2014-07-11 17:30:20 +08:00
|
|
|
for {
|
2014-08-21 16:07:06 +08:00
|
|
|
ps, err := cl.dHT.GetPeers(string(t.InfoHash[:]))
|
2014-07-11 17:30:20 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Printf("error getting peers from dht: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
nextScrape := time.After(1 * time.Minute)
|
|
|
|
getPeers:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-nextScrape:
|
|
|
|
break getPeers
|
2014-11-18 07:47:36 +08:00
|
|
|
case v, ok := <-ps.Values:
|
2014-07-11 17:30:20 +08:00
|
|
|
if !ok {
|
|
|
|
break getPeers
|
|
|
|
}
|
2014-11-18 07:47:36 +08:00
|
|
|
peersFoundByDHT.Add(int64(len(v.Peers)))
|
2014-07-11 17:30:20 +08:00
|
|
|
err = cl.AddPeers(t.InfoHash, func() (ret []Peer) {
|
2014-11-18 07:47:36 +08:00
|
|
|
for _, cp := range v.Peers {
|
2014-07-11 17:30:20 +08:00
|
|
|
ret = append(ret, Peer{
|
2014-07-16 15:06:18 +08:00
|
|
|
IP: cp.IP[:],
|
|
|
|
Port: int(cp.Port),
|
|
|
|
Source: peerSourceDHT,
|
2014-07-11 17:30:20 +08:00
|
|
|
})
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}())
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error adding peers from dht for torrent %q: %s", t, err)
|
|
|
|
break getPeers
|
|
|
|
}
|
2014-08-28 07:39:27 +08:00
|
|
|
case <-t.ceasingNetworking:
|
2014-08-25 04:01:05 +08:00
|
|
|
ps.Close()
|
|
|
|
return
|
2014-07-11 17:30:20 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
ps.Close()
|
2014-11-17 03:18:08 +08:00
|
|
|
|
|
|
|
// After a GetPeers, we can announce on the best nodes that gave us an
|
|
|
|
// announce token.
|
|
|
|
|
|
|
|
port := cl.incomingPeerPort()
|
|
|
|
// If port is zero, then we're not listening, and there's nothing to
|
|
|
|
// announce.
|
|
|
|
if port != 0 {
|
|
|
|
// We can't allow the port to be implied as long as the UTP and
|
|
|
|
// DHT ports are different.
|
2014-11-17 13:27:01 +08:00
|
|
|
cl.dHT.AnnouncePeer(port, impliedPort, t.InfoHash.AsString())
|
2014-11-17 03:18:08 +08:00
|
|
|
}
|
2014-07-11 17:30:20 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-09 00:36:05 +08:00
|
|
|
func (cl *Client) announceTorrent(t *torrent) {
|
2014-03-16 23:30:10 +08:00
|
|
|
req := tracker.AnnounceRequest{
|
2014-05-22 22:35:24 +08:00
|
|
|
Event: tracker.Started,
|
|
|
|
NumWant: -1,
|
2014-11-17 03:16:26 +08:00
|
|
|
Port: int16(cl.incomingPeerPort()),
|
2014-08-21 16:07:06 +08:00
|
|
|
PeerId: cl.peerID,
|
2014-05-22 22:35:24 +08:00
|
|
|
InfoHash: t.InfoHash,
|
2014-03-16 23:30:10 +08:00
|
|
|
}
|
|
|
|
newAnnounce:
|
|
|
|
for {
|
2014-08-28 07:39:27 +08:00
|
|
|
select {
|
|
|
|
case <-t.ceasingNetworking:
|
2014-07-24 11:46:14 +08:00
|
|
|
return
|
2014-08-28 07:39:27 +08:00
|
|
|
default:
|
2014-07-24 11:46:14 +08:00
|
|
|
}
|
2014-08-28 07:39:27 +08:00
|
|
|
cl.mu.Lock()
|
2014-05-22 22:35:24 +08:00
|
|
|
req.Left = t.BytesLeft()
|
2014-05-23 19:01:35 +08:00
|
|
|
cl.mu.Unlock()
|
2014-03-16 23:30:10 +08:00
|
|
|
for _, tier := range t.Trackers {
|
|
|
|
for trIndex, tr := range tier {
|
|
|
|
if err := tr.Connect(); err != nil {
|
|
|
|
log.Print(err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
resp, err := tr.Announce(&req)
|
|
|
|
if err != nil {
|
|
|
|
log.Print(err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
var peers []Peer
|
|
|
|
for _, peer := range resp.Peers {
|
|
|
|
peers = append(peers, Peer{
|
2014-03-17 22:44:22 +08:00
|
|
|
IP: peer.IP,
|
|
|
|
Port: peer.Port,
|
2014-03-16 23:30:10 +08:00
|
|
|
})
|
|
|
|
}
|
2014-05-22 22:35:24 +08:00
|
|
|
err = cl.AddPeers(t.InfoHash, peers)
|
|
|
|
if err != nil {
|
2014-07-24 11:43:45 +08:00
|
|
|
log.Printf("error adding peers to torrent %s: %s", t, err)
|
2014-05-22 22:35:24 +08:00
|
|
|
} else {
|
|
|
|
log.Printf("%s: %d new peers from %s", t, len(peers), tr)
|
2014-03-16 23:30:10 +08:00
|
|
|
}
|
|
|
|
tier[0], tier[trIndex] = tier[trIndex], tier[0]
|
|
|
|
time.Sleep(time.Second * time.Duration(resp.Interval))
|
2014-05-22 22:35:24 +08:00
|
|
|
req.Event = tracker.None
|
2014-03-16 23:30:10 +08:00
|
|
|
continue newAnnounce
|
|
|
|
}
|
|
|
|
}
|
2014-05-22 22:35:24 +08:00
|
|
|
time.Sleep(5 * time.Second)
|
2014-03-16 23:30:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) allTorrentsCompleted() bool {
|
|
|
|
for _, t := range cl.torrents {
|
2014-09-15 01:25:53 +08:00
|
|
|
if !t.haveInfo() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for e := t.IncompletePiecesByBytesLeft.Front(); e != nil; e = e.Next() {
|
|
|
|
i := e.Value.(int)
|
|
|
|
if t.Pieces[i].Complete() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// If the piece isn't complete, make sure it's not because it's
|
|
|
|
// never been hashed.
|
|
|
|
cl.queueFirstHash(t, i)
|
2014-03-16 23:30:10 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2014-04-09 00:36:05 +08:00
|
|
|
// Returns true when all torrents are completely downloaded and false if the
|
2014-06-29 22:22:05 +08:00
|
|
|
// client is stopped before that.
|
2014-04-09 00:36:05 +08:00
|
|
|
func (me *Client) WaitAll() bool {
|
2013-10-20 22:07:01 +08:00
|
|
|
me.mu.Lock()
|
2014-04-09 00:36:05 +08:00
|
|
|
defer me.mu.Unlock()
|
2014-03-16 23:30:10 +08:00
|
|
|
for !me.allTorrentsCompleted() {
|
2014-04-09 00:36:05 +08:00
|
|
|
if me.stopped() {
|
|
|
|
return false
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
me.event.Wait()
|
|
|
|
}
|
2014-04-09 00:36:05 +08:00
|
|
|
return true
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
|
|
|
|
2014-05-23 19:01:05 +08:00
|
|
|
func (cl *Client) assertRequestHeat() {
|
2014-08-21 16:07:06 +08:00
|
|
|
dds, ok := cl.downloadStrategy.(*DefaultDownloadStrategy)
|
2014-05-23 19:01:05 +08:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, t := range cl.torrents {
|
|
|
|
m := make(map[request]int, 3000)
|
|
|
|
for _, cn := range t.Conns {
|
|
|
|
for r := range cn.Requests {
|
|
|
|
m[r]++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for r, h := range dds.heat[t] {
|
|
|
|
if m[r] != h {
|
|
|
|
panic(fmt.Sprintln(m[r], h))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-21 15:40:54 +08:00
|
|
|
func (me *Client) replenishConnRequests(t *torrent, c *connection) {
|
2014-07-14 21:12:15 +08:00
|
|
|
if !t.haveInfo() {
|
|
|
|
return
|
|
|
|
}
|
2014-09-15 01:25:53 +08:00
|
|
|
for _, p := range me.downloadStrategy.FillRequests(t, c) {
|
|
|
|
// Make sure the state of pieces that would have been requested is
|
|
|
|
// known.
|
|
|
|
me.queueFirstHash(t, p)
|
|
|
|
}
|
2014-06-29 22:22:36 +08:00
|
|
|
//me.assertRequestHeat()
|
2014-06-28 17:38:31 +08:00
|
|
|
if len(c.Requests) == 0 && !c.PeerChoked {
|
2014-05-21 15:40:54 +08:00
|
|
|
c.SetInterested(false)
|
2013-10-14 22:39:12 +08:00
|
|
|
}
|
2014-05-21 15:40:54 +08:00
|
|
|
}
|
|
|
|
|
2014-08-28 07:32:49 +08:00
|
|
|
// Handle a received chunk from a peer.
|
2014-05-21 16:01:58 +08:00
|
|
|
func (me *Client) downloadedChunk(t *torrent, c *connection, msg *pp.Message) error {
|
2014-08-21 23:33:13 +08:00
|
|
|
chunksDownloadedCount.Add(1)
|
|
|
|
|
2014-05-21 16:01:58 +08:00
|
|
|
req := newRequest(msg.Index, msg.Begin, pp.Integer(len(msg.Piece)))
|
2014-05-21 15:40:54 +08:00
|
|
|
|
|
|
|
// Request has been satisfied.
|
2014-05-23 19:01:05 +08:00
|
|
|
me.connDeleteRequest(t, c, req)
|
2014-05-21 15:40:54 +08:00
|
|
|
|
|
|
|
defer me.replenishConnRequests(t, c)
|
|
|
|
|
|
|
|
// Do we actually want this chunk?
|
|
|
|
if _, ok := t.Pieces[req.Index].PendingChunkSpecs[req.chunkSpec]; !ok {
|
2014-08-21 23:33:13 +08:00
|
|
|
unusedDownloadedChunksCount.Add(1)
|
2014-09-11 18:30:13 +08:00
|
|
|
c.UnwantedChunksReceived++
|
2014-05-21 15:40:54 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-08-28 07:32:49 +08:00
|
|
|
c.UsefulChunksReceived++
|
|
|
|
c.lastUsefulChunkReceived = time.Now()
|
|
|
|
|
2014-05-21 15:40:54 +08:00
|
|
|
// Write the chunk out.
|
|
|
|
err := t.WriteChunk(int(msg.Index), int64(msg.Begin), msg.Piece)
|
2013-10-14 22:39:12 +08:00
|
|
|
if err != nil {
|
2014-08-25 20:14:10 +08:00
|
|
|
return fmt.Errorf("error writing chunk: %s", err)
|
2014-05-21 15:40:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Record that we have the chunk.
|
|
|
|
delete(t.Pieces[req.Index].PendingChunkSpecs, req.chunkSpec)
|
2014-09-14 01:50:15 +08:00
|
|
|
me.dataReady(t, req)
|
2014-05-21 15:40:54 +08:00
|
|
|
if len(t.Pieces[req.Index].PendingChunkSpecs) == 0 {
|
|
|
|
me.queuePieceCheck(t, req.Index)
|
2013-10-14 22:39:12 +08:00
|
|
|
}
|
2014-08-25 03:31:34 +08:00
|
|
|
t.PieceBytesLeftChanged(int(req.Index))
|
2014-05-21 15:40:54 +08:00
|
|
|
|
|
|
|
// Unprioritize the chunk.
|
2014-08-21 16:07:06 +08:00
|
|
|
me.downloadStrategy.TorrentGotChunk(t, req)
|
2014-05-21 15:40:54 +08:00
|
|
|
|
2014-05-29 00:44:27 +08:00
|
|
|
// Cancel pending requests for this chunk.
|
|
|
|
for _, c := range t.Conns {
|
|
|
|
if me.connCancel(t, c, req) {
|
|
|
|
me.replenishConnRequests(t, c)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-24 01:08:11 +08:00
|
|
|
me.downloadStrategy.AssertNotRequested(t, req)
|
|
|
|
|
2014-05-21 15:40:54 +08:00
|
|
|
return nil
|
2013-10-13 20:16:21 +08:00
|
|
|
}
|
|
|
|
|
2014-09-14 01:50:15 +08:00
|
|
|
func (cl *Client) dataReady(t *torrent, r request) {
|
|
|
|
dws := cl.dataWaits[t]
|
|
|
|
begin := t.requestOffset(r)
|
|
|
|
end := begin + int64(r.Length)
|
|
|
|
for i := 0; i < len(dws); {
|
|
|
|
dw := dws[i]
|
|
|
|
if begin <= dw.offset && dw.offset < end {
|
|
|
|
close(dw.ready)
|
|
|
|
dws[i] = dws[len(dws)-1]
|
|
|
|
dws = dws[:len(dws)-1]
|
|
|
|
} else {
|
|
|
|
i++
|
|
|
|
}
|
2013-10-13 20:16:21 +08:00
|
|
|
}
|
2014-09-14 01:50:15 +08:00
|
|
|
cl.dataWaits[t] = dws
|
2014-03-20 01:30:08 +08:00
|
|
|
}
|
|
|
|
|
2014-04-09 00:36:05 +08:00
|
|
|
// Returns a channel that is closed when new data has become available in the
|
|
|
|
// client.
|
2014-09-14 01:50:15 +08:00
|
|
|
func (me *Client) DataWaiter(ih InfoHash, off int64) (ret <-chan struct{}) {
|
|
|
|
me.mu.Lock()
|
|
|
|
defer me.mu.Unlock()
|
|
|
|
ch := make(chan struct{})
|
|
|
|
ret = ch
|
|
|
|
t := me.torrents[ih]
|
|
|
|
if t == nil {
|
|
|
|
close(ch)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if r, ok := t.offsetRequest(off); !ok || t.haveChunk(r) {
|
|
|
|
close(ch)
|
|
|
|
return
|
2014-03-20 01:30:08 +08:00
|
|
|
}
|
2014-09-14 01:50:15 +08:00
|
|
|
me.dataWaits[t] = append(me.dataWaits[t], dataWait{
|
|
|
|
offset: off,
|
|
|
|
ready: ch,
|
|
|
|
})
|
2014-08-25 03:24:18 +08:00
|
|
|
return
|
2013-10-01 16:43:18 +08:00
|
|
|
}
|
|
|
|
|
2014-05-21 16:01:58 +08:00
|
|
|
func (me *Client) pieceHashed(t *torrent, piece pp.Integer, correct bool) {
|
2013-10-20 22:07:01 +08:00
|
|
|
p := t.Pieces[piece]
|
2014-09-14 01:57:51 +08:00
|
|
|
if p.EverHashed && !correct {
|
|
|
|
log.Printf("%s: piece %d failed hash", t, piece)
|
|
|
|
failedPieceHashes.Add(1)
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
p.EverHashed = true
|
|
|
|
if correct {
|
|
|
|
p.PendingChunkSpecs = nil
|
2014-08-21 16:07:06 +08:00
|
|
|
me.downloadStrategy.TorrentGotPiece(t, int(piece))
|
2014-09-14 01:58:13 +08:00
|
|
|
me.dataReady(t, request{
|
|
|
|
pp.Integer(piece),
|
|
|
|
chunkSpec{0, pp.Integer(t.PieceLength(piece))},
|
2013-10-13 20:16:21 +08:00
|
|
|
})
|
2013-10-20 22:07:01 +08:00
|
|
|
} else {
|
|
|
|
if len(p.PendingChunkSpecs) == 0 {
|
2014-03-20 13:58:09 +08:00
|
|
|
t.pendAllChunkSpecs(piece)
|
2013-10-20 22:07:01 +08:00
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
2014-08-25 03:31:34 +08:00
|
|
|
t.PieceBytesLeftChanged(int(piece))
|
2013-10-20 22:07:01 +08:00
|
|
|
for _, conn := range t.Conns {
|
2013-09-30 19:51:08 +08:00
|
|
|
if correct {
|
2014-05-21 16:01:58 +08:00
|
|
|
conn.Post(pp.Message{
|
|
|
|
Type: pp.Have,
|
|
|
|
Index: pp.Integer(piece),
|
2013-09-30 19:51:08 +08:00
|
|
|
})
|
2014-03-20 21:40:54 +08:00
|
|
|
// TODO: Cancel requests for this piece.
|
2014-09-14 02:03:23 +08:00
|
|
|
for r := range conn.Requests {
|
|
|
|
if r.Index == piece {
|
|
|
|
panic("wat")
|
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
2014-09-14 02:03:23 +08:00
|
|
|
// Do this even if the piece is correct because new first-hashings may
|
|
|
|
// need to be scheduled.
|
|
|
|
if conn.PeerHasPiece(piece) {
|
|
|
|
me.replenishConnRequests(t, conn)
|
|
|
|
}
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
2014-08-28 07:39:27 +08:00
|
|
|
if t.haveAllPieces() && me.noUpload {
|
|
|
|
t.CeaseNetworking()
|
|
|
|
}
|
2014-03-16 23:30:10 +08:00
|
|
|
me.event.Broadcast()
|
2013-10-02 15:57:59 +08:00
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
|
2014-05-21 16:01:58 +08:00
|
|
|
func (cl *Client) verifyPiece(t *torrent, index pp.Integer) {
|
2014-03-20 01:30:08 +08:00
|
|
|
cl.mu.Lock()
|
2014-09-14 02:07:05 +08:00
|
|
|
defer cl.mu.Unlock()
|
2014-03-20 01:30:08 +08:00
|
|
|
p := t.Pieces[index]
|
|
|
|
for p.Hashing {
|
|
|
|
cl.event.Wait()
|
|
|
|
}
|
2014-07-22 23:54:11 +08:00
|
|
|
if t.isClosed() {
|
|
|
|
return
|
|
|
|
}
|
2014-03-20 01:30:08 +08:00
|
|
|
p.Hashing = true
|
|
|
|
p.QueuedForHash = false
|
|
|
|
cl.mu.Unlock()
|
2013-10-20 22:07:01 +08:00
|
|
|
sum := t.HashPiece(index)
|
|
|
|
cl.mu.Lock()
|
2014-09-14 02:07:05 +08:00
|
|
|
select {
|
|
|
|
case <-t.closing:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2014-03-20 01:30:08 +08:00
|
|
|
p.Hashing = false
|
|
|
|
cl.pieceHashed(t, index, sum == p.Hash)
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
2013-10-06 15:01:39 +08:00
|
|
|
|
2014-04-09 00:36:05 +08:00
|
|
|
func (me *Client) Torrents() (ret []*torrent) {
|
2013-10-20 22:07:01 +08:00
|
|
|
me.mu.Lock()
|
|
|
|
for _, t := range me.torrents {
|
|
|
|
ret = append(ret, t)
|
|
|
|
}
|
|
|
|
me.mu.Unlock()
|
2013-10-06 15:01:39 +08:00
|
|
|
return
|
|
|
|
}
|