2013-09-26 17:49:15 +08:00
|
|
|
package torrent
|
|
|
|
|
|
|
|
import (
|
2013-09-30 19:51:08 +08:00
|
|
|
"bufio"
|
2013-09-29 14:45:17 +08:00
|
|
|
"container/list"
|
2013-09-26 17:49:15 +08:00
|
|
|
"crypto"
|
2013-09-29 06:11:24 +08:00
|
|
|
"crypto/rand"
|
2013-09-30 19:51:08 +08:00
|
|
|
"encoding"
|
2013-09-26 17:49:15 +08:00
|
|
|
"errors"
|
2013-10-07 15:58:33 +08:00
|
|
|
"fmt"
|
2013-09-26 17:49:15 +08:00
|
|
|
"io"
|
2013-09-29 06:11:24 +08:00
|
|
|
"log"
|
2014-03-16 23:30:10 +08:00
|
|
|
mathRand "math/rand"
|
2013-09-29 06:11:24 +08:00
|
|
|
"net"
|
2013-09-26 17:49:15 +08:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2013-10-15 16:42:30 +08:00
|
|
|
"sort"
|
2013-10-20 22:07:01 +08:00
|
|
|
"sync"
|
|
|
|
"time"
|
2014-03-20 13:58:09 +08:00
|
|
|
|
|
|
|
metainfo "github.com/nsf/libtorgo/torrent"
|
|
|
|
|
|
|
|
"bitbucket.org/anacrolix/go.torrent/peer_protocol"
|
|
|
|
"bitbucket.org/anacrolix/go.torrent/tracker"
|
|
|
|
_ "bitbucket.org/anacrolix/go.torrent/tracker/udp"
|
|
|
|
"launchpad.net/gommap"
|
2013-09-26 17:49:15 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2013-09-30 19:51:08 +08:00
|
|
|
PieceHash = crypto.SHA1
|
2013-10-20 22:07:01 +08:00
|
|
|
maxRequests = 250
|
2013-09-30 19:51:08 +08:00
|
|
|
chunkSize = 0x4000 // 16KiB
|
2013-10-02 18:12:05 +08:00
|
|
|
BEP20 = "-GT0000-"
|
2013-09-26 17:49:15 +08:00
|
|
|
)
|
|
|
|
|
2013-09-29 06:11:24 +08:00
|
|
|
type InfoHash [20]byte
|
2013-09-26 17:49:15 +08:00
|
|
|
|
|
|
|
type pieceSum [20]byte
|
|
|
|
|
|
|
|
func copyHashSum(dst, src []byte) {
|
|
|
|
if len(dst) != len(src) || copy(dst, src) != len(dst) {
|
|
|
|
panic("hash sum sizes differ")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-29 06:11:24 +08:00
|
|
|
func BytesInfoHash(b []byte) (ih InfoHash) {
|
2013-09-29 14:43:35 +08:00
|
|
|
if len(b) != len(ih) || copy(ih[:], b) != len(ih) {
|
2013-09-26 17:49:15 +08:00
|
|
|
panic("bad infohash bytes")
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
type piece struct {
|
2013-10-01 16:43:18 +08:00
|
|
|
Hash pieceSum
|
2013-10-13 20:16:21 +08:00
|
|
|
PendingChunkSpecs map[ChunkSpec]struct{}
|
2013-10-20 22:07:01 +08:00
|
|
|
Hashing bool
|
2014-03-20 01:30:08 +08:00
|
|
|
QueuedForHash bool
|
2013-10-20 22:07:01 +08:00
|
|
|
EverHashed bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *piece) Complete() bool {
|
2014-03-20 01:30:08 +08:00
|
|
|
return len(p.PendingChunkSpecs) == 0 && p.EverHashed
|
2013-10-20 22:07:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func lastChunkSpec(pieceLength peer_protocol.Integer) (cs ChunkSpec) {
|
|
|
|
cs.Begin = (pieceLength - 1) / chunkSize * chunkSize
|
|
|
|
cs.Length = pieceLength - cs.Begin
|
|
|
|
return
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
|
|
|
|
2013-10-20 22:07:01 +08:00
|
|
|
func (t *Torrent) PieceNumPendingBytes(index peer_protocol.Integer) (count peer_protocol.Integer) {
|
|
|
|
pendingChunks := t.Pieces[index].PendingChunkSpecs
|
|
|
|
count = peer_protocol.Integer(len(pendingChunks)) * chunkSize
|
|
|
|
_lastChunkSpec := lastChunkSpec(t.PieceLength(index))
|
|
|
|
if _lastChunkSpec.Length != chunkSize {
|
|
|
|
if _, ok := pendingChunks[_lastChunkSpec]; ok {
|
|
|
|
count += _lastChunkSpec.Length - chunkSize
|
|
|
|
}
|
2013-10-15 16:42:30 +08:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-10-13 20:16:21 +08:00
|
|
|
type ChunkSpec struct {
|
2013-09-30 19:51:08 +08:00
|
|
|
Begin, Length peer_protocol.Integer
|
|
|
|
}
|
|
|
|
|
2013-10-13 20:16:21 +08:00
|
|
|
type Request struct {
|
2013-10-01 16:43:18 +08:00
|
|
|
Index peer_protocol.Integer
|
2013-10-13 20:16:21 +08:00
|
|
|
ChunkSpec
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
|
|
|
|
2013-10-22 15:00:35 +08:00
|
|
|
type Connection struct {
|
2013-09-29 06:11:24 +08:00
|
|
|
Socket net.Conn
|
2014-03-18 19:39:33 +08:00
|
|
|
Closed bool
|
2013-09-30 19:51:08 +08:00
|
|
|
post chan encoding.BinaryMarshaler
|
2013-09-29 14:45:17 +08:00
|
|
|
write chan []byte
|
2013-09-29 06:11:24 +08:00
|
|
|
|
2014-03-17 22:44:22 +08:00
|
|
|
// Stuff controlled by the local peer.
|
2013-09-29 06:11:24 +08:00
|
|
|
Interested bool
|
|
|
|
Choked bool
|
2013-10-13 20:16:21 +08:00
|
|
|
Requests map[Request]struct{}
|
2013-09-29 06:11:24 +08:00
|
|
|
|
2014-03-17 22:44:22 +08:00
|
|
|
// Stuff controlled by the remote peer.
|
2013-09-29 06:11:24 +08:00
|
|
|
PeerId [20]byte
|
|
|
|
PeerInterested bool
|
|
|
|
PeerChoked bool
|
2013-10-13 20:16:21 +08:00
|
|
|
PeerRequests map[Request]struct{}
|
2013-09-29 14:45:17 +08:00
|
|
|
PeerExtensions [8]byte
|
2013-09-30 19:51:08 +08:00
|
|
|
PeerPieces []bool
|
|
|
|
}
|
|
|
|
|
2013-10-22 15:00:35 +08:00
|
|
|
func (c *Connection) Close() {
|
2014-03-18 19:39:33 +08:00
|
|
|
if c.Closed {
|
|
|
|
return
|
|
|
|
}
|
2013-10-14 22:39:12 +08:00
|
|
|
c.Socket.Close()
|
|
|
|
close(c.post)
|
2014-03-18 19:39:33 +08:00
|
|
|
c.Closed = true
|
2013-10-14 22:39:12 +08:00
|
|
|
}
|
|
|
|
|
2013-10-22 15:00:35 +08:00
|
|
|
func (c *Connection) PeerHasPiece(index peer_protocol.Integer) bool {
|
2013-09-30 19:51:08 +08:00
|
|
|
if c.PeerPieces == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return c.PeerPieces[index]
|
|
|
|
}
|
|
|
|
|
2013-10-22 15:00:35 +08:00
|
|
|
func (c *Connection) Post(msg encoding.BinaryMarshaler) {
|
2013-09-30 19:51:08 +08:00
|
|
|
c.post <- msg
|
|
|
|
}
|
|
|
|
|
2014-03-20 01:30:08 +08:00
|
|
|
// Returns true if more requests can be sent.
|
2013-10-22 15:00:35 +08:00
|
|
|
func (c *Connection) Request(chunk Request) bool {
|
2014-03-20 01:30:08 +08:00
|
|
|
if !c.PeerPieces[chunk.Index] {
|
|
|
|
panic("peer doesn't have that piece!")
|
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
if len(c.Requests) >= maxRequests {
|
|
|
|
return false
|
|
|
|
}
|
2013-10-14 22:39:12 +08:00
|
|
|
c.SetInterested(true)
|
|
|
|
if c.PeerChoked {
|
|
|
|
return false
|
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
if _, ok := c.Requests[chunk]; !ok {
|
|
|
|
c.Post(peer_protocol.Message{
|
|
|
|
Type: peer_protocol.Request,
|
|
|
|
Index: chunk.Index,
|
|
|
|
Begin: chunk.Begin,
|
|
|
|
Length: chunk.Length,
|
|
|
|
})
|
|
|
|
}
|
2013-10-01 16:43:18 +08:00
|
|
|
if c.Requests == nil {
|
2013-10-13 20:16:21 +08:00
|
|
|
c.Requests = make(map[Request]struct{}, maxRequests)
|
2013-10-01 16:43:18 +08:00
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
c.Requests[chunk] = struct{}{}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2014-03-17 22:44:22 +08:00
|
|
|
func (c *Connection) Unchoke() {
|
|
|
|
if !c.Choked {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.Post(peer_protocol.Message{
|
|
|
|
Type: peer_protocol.Unchoke,
|
|
|
|
})
|
|
|
|
c.Choked = false
|
|
|
|
}
|
|
|
|
|
2013-10-22 15:00:35 +08:00
|
|
|
func (c *Connection) SetInterested(interested bool) {
|
2013-09-30 19:51:08 +08:00
|
|
|
if c.Interested == interested {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.Post(peer_protocol.Message{
|
|
|
|
Type: func() peer_protocol.MessageType {
|
|
|
|
if interested {
|
|
|
|
return peer_protocol.Interested
|
2013-10-01 16:43:18 +08:00
|
|
|
} else {
|
|
|
|
return peer_protocol.NotInterested
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
|
|
|
}(),
|
|
|
|
})
|
|
|
|
c.Interested = interested
|
2013-09-29 14:45:17 +08:00
|
|
|
}
|
|
|
|
|
2013-10-20 22:07:01 +08:00
|
|
|
var (
|
|
|
|
keepAliveBytes [4]byte
|
|
|
|
)
|
|
|
|
|
2013-10-22 15:00:35 +08:00
|
|
|
func (conn *Connection) writer() {
|
|
|
|
timer := time.NewTimer(0)
|
2013-11-04 21:06:08 +08:00
|
|
|
defer timer.Stop()
|
2013-09-29 14:45:17 +08:00
|
|
|
for {
|
2013-10-22 15:00:35 +08:00
|
|
|
if !timer.Reset(time.Minute) {
|
|
|
|
<-timer.C
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
var b []byte
|
|
|
|
select {
|
|
|
|
case <-timer.C:
|
|
|
|
b = keepAliveBytes[:]
|
|
|
|
case b = <-conn.write:
|
|
|
|
if b == nil {
|
|
|
|
return
|
|
|
|
}
|
2013-10-14 22:39:12 +08:00
|
|
|
}
|
2013-09-29 14:45:17 +08:00
|
|
|
n, err := conn.Socket.Write(b)
|
|
|
|
if err != nil {
|
|
|
|
log.Print(err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if n != len(b) {
|
|
|
|
panic("didn't write all bytes")
|
|
|
|
}
|
2013-10-01 16:43:18 +08:00
|
|
|
log.Printf("wrote %#v", string(b))
|
2013-09-29 14:45:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-22 15:00:35 +08:00
|
|
|
func (conn *Connection) writeOptimizer() {
|
2013-09-29 14:45:17 +08:00
|
|
|
pending := list.New()
|
|
|
|
var nextWrite []byte
|
2013-10-14 22:39:12 +08:00
|
|
|
defer close(conn.write)
|
2013-09-29 14:45:17 +08:00
|
|
|
for {
|
|
|
|
write := conn.write
|
|
|
|
if pending.Len() == 0 {
|
|
|
|
write = nil
|
|
|
|
} else {
|
2013-09-30 19:51:08 +08:00
|
|
|
var err error
|
|
|
|
nextWrite, err = pending.Front().Value.(encoding.BinaryMarshaler).MarshalBinary()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2013-09-29 14:45:17 +08:00
|
|
|
}
|
|
|
|
select {
|
2013-10-14 22:39:12 +08:00
|
|
|
case msg, ok := <-conn.post:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2013-09-29 14:45:17 +08:00
|
|
|
pending.PushBack(msg)
|
|
|
|
case write <- nextWrite:
|
|
|
|
pending.Remove(pending.Front())
|
|
|
|
}
|
|
|
|
}
|
2013-09-29 06:11:24 +08:00
|
|
|
}
|
|
|
|
|
2013-10-06 15:01:39 +08:00
|
|
|
type Torrent struct {
|
2013-10-14 22:39:12 +08:00
|
|
|
InfoHash InfoHash
|
2013-10-20 22:07:01 +08:00
|
|
|
Pieces []*piece
|
2013-10-14 22:39:12 +08:00
|
|
|
Data MMapSpan
|
|
|
|
MetaInfo *metainfo.MetaInfo
|
2013-10-22 15:01:56 +08:00
|
|
|
Conns []*Connection
|
2013-10-14 22:39:12 +08:00
|
|
|
Peers []Peer
|
|
|
|
Priorities *list.List
|
2014-03-16 23:30:10 +08:00
|
|
|
// BEP 12 Multitracker Metadata Extension. The tracker.Client instances
|
|
|
|
// mirror their respective URLs from the announce-list key.
|
|
|
|
Trackers [][]tracker.Client
|
|
|
|
}
|
|
|
|
|
2014-03-20 13:58:09 +08:00
|
|
|
func (t *Torrent) NumPieces() int {
|
|
|
|
return len(t.MetaInfo.Pieces) / PieceHash.Size()
|
|
|
|
}
|
|
|
|
|
2014-03-16 23:30:10 +08:00
|
|
|
func (t *Torrent) Length() int64 {
|
|
|
|
return int64(t.PieceLength(peer_protocol.Integer(len(t.Pieces)-1))) + int64(len(t.Pieces)-1)*int64(t.PieceLength(0))
|
2013-10-14 22:39:12 +08:00
|
|
|
}
|
|
|
|
|
2013-10-20 22:07:01 +08:00
|
|
|
func (t *Torrent) Close() (err error) {
|
|
|
|
t.Data.Close()
|
|
|
|
for _, conn := range t.Conns {
|
|
|
|
conn.Close()
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-10-15 16:42:30 +08:00
|
|
|
type pieceByBytesPendingSlice struct {
|
2013-10-20 22:07:01 +08:00
|
|
|
Pending, Indices []peer_protocol.Integer
|
2013-10-15 16:42:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (pcs pieceByBytesPendingSlice) Len() int {
|
|
|
|
return len(pcs.Indices)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me pieceByBytesPendingSlice) Less(i, j int) bool {
|
2013-10-20 22:07:01 +08:00
|
|
|
return me.Pending[me.Indices[i]] < me.Pending[me.Indices[j]]
|
2013-10-15 16:42:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (me pieceByBytesPendingSlice) Swap(i, j int) {
|
|
|
|
me.Indices[i], me.Indices[j] = me.Indices[j], me.Indices[i]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *Torrent) piecesByPendingBytesDesc() (indices []peer_protocol.Integer) {
|
2013-10-22 15:02:52 +08:00
|
|
|
slice := pieceByBytesPendingSlice{
|
|
|
|
Pending: make([]peer_protocol.Integer, 0, len(t.Pieces)),
|
|
|
|
Indices: make([]peer_protocol.Integer, 0, len(t.Pieces)),
|
|
|
|
}
|
2013-10-15 16:42:30 +08:00
|
|
|
for i := range t.Pieces {
|
2013-10-20 22:07:01 +08:00
|
|
|
slice.Pending = append(slice.Pending, t.PieceNumPendingBytes(peer_protocol.Integer(i)))
|
2013-10-15 16:42:30 +08:00
|
|
|
slice.Indices = append(slice.Indices, peer_protocol.Integer(i))
|
|
|
|
}
|
|
|
|
sort.Sort(sort.Reverse(slice))
|
|
|
|
return slice.Indices
|
|
|
|
}
|
|
|
|
|
2014-03-16 23:30:10 +08:00
|
|
|
// Currently doesn't really queue, but should in the future.
|
2013-10-20 22:07:01 +08:00
|
|
|
func (cl *Client) queuePieceCheck(t *Torrent, pieceIndex peer_protocol.Integer) {
|
|
|
|
piece := t.Pieces[pieceIndex]
|
2014-03-20 01:30:08 +08:00
|
|
|
if piece.QueuedForHash {
|
2013-10-20 22:07:01 +08:00
|
|
|
return
|
|
|
|
}
|
2014-03-20 01:30:08 +08:00
|
|
|
piece.QueuedForHash = true
|
2013-10-20 22:07:01 +08:00
|
|
|
go cl.verifyPiece(t, pieceIndex)
|
|
|
|
}
|
|
|
|
|
2014-03-20 13:58:09 +08:00
|
|
|
func (t *Torrent) offsetRequest(off int64) (req Request, ok bool) {
|
|
|
|
req.Index = peer_protocol.Integer(off / t.MetaInfo.PieceLength)
|
|
|
|
if req.Index < 0 || int(req.Index) >= len(t.Pieces) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
off %= t.MetaInfo.PieceLength
|
|
|
|
pieceLeft := t.PieceLength(req.Index) - peer_protocol.Integer(off)
|
|
|
|
if pieceLeft <= 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
req.Begin = chunkSize * (peer_protocol.Integer(off) / chunkSize)
|
|
|
|
req.Length = chunkSize
|
|
|
|
if req.Length > pieceLeft {
|
|
|
|
req.Length = pieceLeft
|
|
|
|
}
|
|
|
|
ok = true
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-10-20 22:07:01 +08:00
|
|
|
func (cl *Client) PrioritizeDataRegion(ih InfoHash, off, len_ int64) {
|
|
|
|
cl.mu.Lock()
|
|
|
|
defer cl.mu.Unlock()
|
|
|
|
t := cl.torrent(ih)
|
2013-10-14 22:39:12 +08:00
|
|
|
newPriorities := make([]Request, 0, (len_+2*(chunkSize-1))/chunkSize)
|
2014-03-20 01:30:08 +08:00
|
|
|
for len_ > 0 {
|
2014-03-17 22:44:22 +08:00
|
|
|
// TODO: Write a function to return the Request for a given offset.
|
2014-03-20 13:58:09 +08:00
|
|
|
req, ok := t.offsetRequest(off)
|
|
|
|
if !ok {
|
|
|
|
break
|
2014-03-17 22:44:22 +08:00
|
|
|
}
|
2014-03-20 13:58:09 +08:00
|
|
|
off += int64(req.Length)
|
|
|
|
len_ -= int64(req.Length)
|
|
|
|
if _, ok = t.Pieces[req.Index].PendingChunkSpecs[req.ChunkSpec]; !ok {
|
2013-10-20 22:07:01 +08:00
|
|
|
continue
|
2013-10-14 22:39:12 +08:00
|
|
|
}
|
2014-03-20 13:58:09 +08:00
|
|
|
newPriorities = append(newPriorities, req)
|
2013-10-14 22:39:12 +08:00
|
|
|
}
|
2014-03-20 01:30:08 +08:00
|
|
|
if len(newPriorities) == 0 {
|
2013-10-14 22:39:12 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
log.Print(newPriorities)
|
|
|
|
if t.Priorities == nil {
|
|
|
|
t.Priorities = list.New()
|
|
|
|
}
|
|
|
|
t.Priorities.PushFront(newPriorities[0])
|
|
|
|
for _, req := range newPriorities[1:] {
|
|
|
|
t.Priorities.PushBack(req)
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
for _, cn := range t.Conns {
|
|
|
|
cl.replenishConnRequests(t, cn)
|
|
|
|
}
|
2013-09-29 06:11:24 +08:00
|
|
|
}
|
|
|
|
|
2013-10-06 15:01:39 +08:00
|
|
|
func (t *Torrent) WriteChunk(piece int, begin int64, data []byte) (err error) {
|
2013-10-02 17:55:03 +08:00
|
|
|
_, err = t.Data.WriteAt(data, int64(piece)*t.MetaInfo.PieceLength+begin)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-10-06 15:01:39 +08:00
|
|
|
func (t *Torrent) bitfield() (bf []bool) {
|
2013-09-30 19:51:08 +08:00
|
|
|
for _, p := range t.Pieces {
|
2014-03-20 01:30:08 +08:00
|
|
|
bf = append(bf, p.EverHashed && len(p.PendingChunkSpecs) == 0)
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-03-20 13:58:09 +08:00
|
|
|
func (t *Torrent) pendAllChunkSpecs(index peer_protocol.Integer) {
|
|
|
|
piece := t.Pieces[index]
|
|
|
|
if piece.PendingChunkSpecs == nil {
|
|
|
|
piece.PendingChunkSpecs = make(
|
|
|
|
map[ChunkSpec]struct{},
|
|
|
|
(t.MetaInfo.PieceLength+chunkSize-1)/chunkSize)
|
|
|
|
}
|
2013-10-13 20:16:21 +08:00
|
|
|
c := ChunkSpec{
|
2013-09-30 19:51:08 +08:00
|
|
|
Begin: 0,
|
|
|
|
}
|
2014-03-20 13:58:09 +08:00
|
|
|
cs := piece.PendingChunkSpecs
|
|
|
|
log.Print(index, t.PieceLength(index))
|
|
|
|
for left := peer_protocol.Integer(t.PieceLength(index)); left != 0; left -= c.Length {
|
2013-09-30 19:51:08 +08:00
|
|
|
c.Length = left
|
|
|
|
if c.Length > chunkSize {
|
|
|
|
c.Length = chunkSize
|
|
|
|
}
|
|
|
|
cs[c] = struct{}{}
|
|
|
|
c.Begin += c.Length
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-10-13 20:16:21 +08:00
|
|
|
func (t *Torrent) requestHeat() (ret map[Request]int) {
|
|
|
|
ret = make(map[Request]int)
|
2013-09-30 19:51:08 +08:00
|
|
|
for _, conn := range t.Conns {
|
|
|
|
for req, _ := range conn.Requests {
|
|
|
|
ret[req]++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-09-29 06:11:24 +08:00
|
|
|
type Peer struct {
|
2014-03-17 22:44:22 +08:00
|
|
|
Id [20]byte
|
|
|
|
IP net.IP
|
|
|
|
Port int
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
|
|
|
|
2013-10-14 22:39:12 +08:00
|
|
|
func (t *Torrent) PieceLength(piece peer_protocol.Integer) (len_ peer_protocol.Integer) {
|
2014-03-20 13:58:09 +08:00
|
|
|
if int(piece) == t.NumPieces()-1 {
|
2013-10-13 20:16:21 +08:00
|
|
|
len_ = peer_protocol.Integer(t.Data.Size() % t.MetaInfo.PieceLength)
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
2013-10-13 20:16:21 +08:00
|
|
|
if len_ == 0 {
|
|
|
|
len_ = peer_protocol.Integer(t.MetaInfo.PieceLength)
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-10-14 22:39:12 +08:00
|
|
|
func (t *Torrent) HashPiece(piece peer_protocol.Integer) (ps pieceSum) {
|
2013-09-26 17:49:15 +08:00
|
|
|
hash := PieceHash.New()
|
2013-10-07 15:58:33 +08:00
|
|
|
n, err := t.Data.WriteSectionTo(hash, int64(piece)*t.MetaInfo.PieceLength, t.MetaInfo.PieceLength)
|
2013-09-26 17:49:15 +08:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2013-10-13 20:16:21 +08:00
|
|
|
if peer_protocol.Integer(n) != t.PieceLength(piece) {
|
|
|
|
panic(fmt.Sprintf("hashed wrong number of bytes: expected %d; did %d; piece %d", t.PieceLength(piece), n, piece))
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
|
|
|
copyHashSum(ps[:], hash.Sum(nil))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-10-13 20:16:21 +08:00
|
|
|
type DataSpec struct {
|
|
|
|
InfoHash
|
|
|
|
Request
|
|
|
|
}
|
|
|
|
|
2013-10-06 15:01:39 +08:00
|
|
|
type Client struct {
|
2014-03-20 01:30:08 +08:00
|
|
|
DataDir string
|
|
|
|
HalfOpenLimit int
|
|
|
|
PeerId [20]byte
|
|
|
|
Listener net.Listener
|
|
|
|
DisableTrackers bool
|
2013-09-29 06:11:24 +08:00
|
|
|
|
2013-10-22 15:01:56 +08:00
|
|
|
sync.Mutex
|
|
|
|
mu *sync.Mutex
|
2013-10-20 22:07:01 +08:00
|
|
|
event sync.Cond
|
2014-03-18 19:39:33 +08:00
|
|
|
quit chan struct{}
|
2013-10-20 22:07:01 +08:00
|
|
|
|
2014-03-20 01:30:08 +08:00
|
|
|
halfOpen int
|
|
|
|
torrents map[InfoHash]*Torrent
|
|
|
|
dataWaiter chan struct{}
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
|
|
|
|
2013-10-13 20:16:21 +08:00
|
|
|
var (
|
|
|
|
ErrDataNotReady = errors.New("data not ready")
|
|
|
|
)
|
2013-09-29 06:11:24 +08:00
|
|
|
|
2013-10-13 20:16:21 +08:00
|
|
|
func (cl *Client) TorrentReadAt(ih InfoHash, off int64, p []byte) (n int, err error) {
|
2013-10-20 22:07:01 +08:00
|
|
|
cl.mu.Lock()
|
|
|
|
defer cl.mu.Unlock()
|
|
|
|
t := cl.torrent(ih)
|
|
|
|
if t == nil {
|
|
|
|
err = errors.New("unknown torrent")
|
|
|
|
return
|
|
|
|
}
|
2014-03-17 22:44:22 +08:00
|
|
|
index := peer_protocol.Integer(off / t.MetaInfo.PieceLength)
|
|
|
|
// Reading outside the bounds of a file is an error.
|
|
|
|
if index < 0 {
|
|
|
|
err = os.ErrInvalid
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if int(index) >= len(t.Pieces) {
|
|
|
|
err = io.EOF
|
|
|
|
return
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
piece := t.Pieces[index]
|
|
|
|
if !piece.EverHashed {
|
|
|
|
cl.queuePieceCheck(t, index)
|
|
|
|
}
|
|
|
|
if piece.Hashing {
|
|
|
|
err = ErrDataNotReady
|
|
|
|
return
|
|
|
|
}
|
|
|
|
pieceOff := peer_protocol.Integer(off % int64(t.PieceLength(0)))
|
|
|
|
high := int(t.PieceLength(index) - pieceOff)
|
|
|
|
if high < len(p) {
|
|
|
|
p = p[:high]
|
|
|
|
}
|
|
|
|
for cs, _ := range piece.PendingChunkSpecs {
|
|
|
|
chunkOff := int64(pieceOff) - int64(cs.Begin)
|
|
|
|
if chunkOff >= int64(t.PieceLength(index)) {
|
|
|
|
panic(chunkOff)
|
2013-10-13 20:16:21 +08:00
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
if 0 <= chunkOff && chunkOff < int64(cs.Length) {
|
|
|
|
// read begins in a pending chunk
|
2013-10-13 20:16:21 +08:00
|
|
|
err = ErrDataNotReady
|
|
|
|
return
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
// pending chunk caps available data
|
|
|
|
if chunkOff < 0 && int64(len(p)) > -chunkOff {
|
|
|
|
p = p[:-chunkOff]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return t.Data.ReadAt(p, off)
|
2013-10-13 20:16:21 +08:00
|
|
|
}
|
2013-09-26 17:49:15 +08:00
|
|
|
|
2013-10-13 20:16:21 +08:00
|
|
|
func (c *Client) Start() {
|
2013-10-22 15:01:56 +08:00
|
|
|
c.mu = &c.Mutex
|
2013-11-04 21:06:40 +08:00
|
|
|
c.event.L = c.mu
|
2013-10-13 20:16:21 +08:00
|
|
|
c.torrents = make(map[InfoHash]*Torrent)
|
2013-10-14 22:39:12 +08:00
|
|
|
if c.HalfOpenLimit == 0 {
|
|
|
|
c.HalfOpenLimit = 10
|
|
|
|
}
|
2013-10-02 18:12:05 +08:00
|
|
|
o := copy(c.PeerId[:], BEP20)
|
|
|
|
_, err := rand.Read(c.PeerId[o:])
|
2013-09-29 06:11:24 +08:00
|
|
|
if err != nil {
|
|
|
|
panic("error generating peer id")
|
|
|
|
}
|
2014-03-18 19:39:33 +08:00
|
|
|
c.quit = make(chan struct{})
|
2014-03-17 22:44:22 +08:00
|
|
|
if c.Listener != nil {
|
|
|
|
go c.acceptConnections()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-20 13:58:09 +08:00
|
|
|
func (cl *Client) stopped() bool {
|
|
|
|
select {
|
|
|
|
case <-cl.quit:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-18 19:39:33 +08:00
|
|
|
func (me *Client) Stop() {
|
|
|
|
close(me.quit)
|
|
|
|
me.event.Broadcast()
|
|
|
|
for _, t := range me.torrents {
|
|
|
|
for _, c := range t.Conns {
|
|
|
|
c.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-17 22:44:22 +08:00
|
|
|
func (cl *Client) acceptConnections() {
|
|
|
|
for {
|
|
|
|
conn, err := cl.Listener.Accept()
|
2014-03-18 19:39:33 +08:00
|
|
|
select {
|
|
|
|
case <-cl.quit:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2014-03-17 22:44:22 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Print(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
go func() {
|
|
|
|
if err := cl.runConnection(conn, nil); err != nil {
|
|
|
|
log.Print(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func mmapTorrentData(metaInfo *metainfo.MetaInfo, location string) (mms MMapSpan, err error) {
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
mms.Close()
|
|
|
|
mms = nil
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
for _, miFile := range metaInfo.Files {
|
|
|
|
fileName := filepath.Join(append([]string{location, metaInfo.Name}, miFile.Path...)...)
|
2013-10-06 18:12:44 +08:00
|
|
|
err = os.MkdirAll(filepath.Dir(fileName), 0777)
|
2013-09-29 02:03:04 +08:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2013-09-26 17:49:15 +08:00
|
|
|
var file *os.File
|
2013-09-29 02:03:04 +08:00
|
|
|
file, err = os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2013-09-29 12:20:18 +08:00
|
|
|
func() {
|
|
|
|
defer file.Close()
|
|
|
|
var fi os.FileInfo
|
|
|
|
fi, err = file.Stat()
|
2013-09-29 02:03:04 +08:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2013-09-29 12:20:18 +08:00
|
|
|
if fi.Size() < miFile.Length {
|
|
|
|
err = file.Truncate(miFile.Length)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
var mMap gommap.MMap
|
|
|
|
mMap, err = gommap.MapRegion(file.Fd(), 0, miFile.Length, gommap.PROT_READ|gommap.PROT_WRITE, gommap.MAP_SHARED)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if int64(len(mMap)) != miFile.Length {
|
|
|
|
panic("mmap has wrong length")
|
|
|
|
}
|
|
|
|
mms = append(mms, MMap{mMap})
|
|
|
|
}()
|
2013-09-26 17:49:15 +08:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-10-06 15:01:39 +08:00
|
|
|
func (me *Client) torrent(ih InfoHash) *Torrent {
|
2013-09-29 06:11:24 +08:00
|
|
|
for _, t := range me.torrents {
|
|
|
|
if t.InfoHash == ih {
|
|
|
|
return t
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-10-06 15:01:39 +08:00
|
|
|
func (me *Client) initiateConn(peer Peer, torrent *Torrent) {
|
2013-09-29 06:11:24 +08:00
|
|
|
if peer.Id == me.PeerId {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
me.halfOpen++
|
|
|
|
go func() {
|
|
|
|
conn, err := net.DialTCP("tcp", nil, &net.TCPAddr{
|
|
|
|
IP: peer.IP,
|
|
|
|
Port: peer.Port,
|
|
|
|
})
|
2013-10-20 22:07:01 +08:00
|
|
|
|
|
|
|
me.mu.Lock()
|
|
|
|
me.halfOpen--
|
|
|
|
me.openNewConns()
|
|
|
|
me.mu.Unlock()
|
|
|
|
|
2013-09-29 06:11:24 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Printf("error connecting to peer: %s", err)
|
|
|
|
return
|
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
log.Printf("connected to %s", conn.RemoteAddr())
|
2014-03-17 22:44:22 +08:00
|
|
|
err = me.runConnection(conn, torrent)
|
2013-10-15 16:42:30 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Print(err)
|
|
|
|
}
|
2013-09-29 06:11:24 +08:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2014-03-16 23:30:10 +08:00
|
|
|
func (t *Torrent) haveAllPieces() bool {
|
|
|
|
for _, piece := range t.Pieces {
|
|
|
|
if !piece.Complete() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2013-10-06 15:01:39 +08:00
|
|
|
func (me *Torrent) haveAnyPieces() bool {
|
2013-10-01 16:43:18 +08:00
|
|
|
for _, piece := range me.Pieces {
|
2013-10-20 22:07:01 +08:00
|
|
|
if piece.Complete() {
|
2013-10-01 16:43:18 +08:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2014-03-17 22:44:22 +08:00
|
|
|
func (me *Client) runConnection(sock net.Conn, torrent *Torrent) (err error) {
|
2013-10-22 15:00:35 +08:00
|
|
|
conn := &Connection{
|
2013-09-29 14:45:17 +08:00
|
|
|
Socket: sock,
|
|
|
|
Choked: true,
|
|
|
|
PeerChoked: true,
|
|
|
|
write: make(chan []byte),
|
2013-09-30 19:51:08 +08:00
|
|
|
post: make(chan encoding.BinaryMarshaler),
|
2013-09-29 14:45:17 +08:00
|
|
|
}
|
2013-10-14 22:39:12 +08:00
|
|
|
defer conn.Close()
|
2013-09-29 14:45:17 +08:00
|
|
|
go conn.writer()
|
|
|
|
go conn.writeOptimizer()
|
|
|
|
conn.post <- peer_protocol.Bytes(peer_protocol.Protocol)
|
|
|
|
conn.post <- peer_protocol.Bytes("\x00\x00\x00\x00\x00\x00\x00\x00")
|
|
|
|
if torrent != nil {
|
|
|
|
conn.post <- peer_protocol.Bytes(torrent.InfoHash[:])
|
|
|
|
conn.post <- peer_protocol.Bytes(me.PeerId[:])
|
|
|
|
}
|
|
|
|
var b [28]byte
|
2013-10-15 16:42:30 +08:00
|
|
|
_, err = io.ReadFull(conn.Socket, b[:])
|
2013-10-20 22:07:01 +08:00
|
|
|
if err != nil {
|
2013-10-14 22:39:12 +08:00
|
|
|
err = fmt.Errorf("when reading protocol and extensions: %s", err)
|
|
|
|
return
|
2013-09-29 14:45:17 +08:00
|
|
|
}
|
|
|
|
if string(b[:20]) != peer_protocol.Protocol {
|
2013-10-14 22:39:12 +08:00
|
|
|
err = fmt.Errorf("wrong protocol: %#v", string(b[:20]))
|
2013-09-29 14:45:17 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if 8 != copy(conn.PeerExtensions[:], b[20:]) {
|
|
|
|
panic("wtf")
|
|
|
|
}
|
|
|
|
log.Printf("peer extensions: %#v", string(conn.PeerExtensions[:]))
|
|
|
|
var infoHash [20]byte
|
|
|
|
_, err = io.ReadFull(conn.Socket, infoHash[:])
|
|
|
|
if err != nil {
|
2013-10-20 22:07:01 +08:00
|
|
|
return fmt.Errorf("reading peer info hash: %s", err)
|
2013-09-29 14:45:17 +08:00
|
|
|
}
|
|
|
|
_, err = io.ReadFull(conn.Socket, conn.PeerId[:])
|
|
|
|
if err != nil {
|
2013-10-20 22:07:01 +08:00
|
|
|
return fmt.Errorf("reading peer id: %s", err)
|
2013-09-29 14:45:17 +08:00
|
|
|
}
|
|
|
|
if torrent == nil {
|
|
|
|
torrent = me.torrent(infoHash)
|
|
|
|
if torrent == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
conn.post <- peer_protocol.Bytes(torrent.InfoHash[:])
|
|
|
|
conn.post <- peer_protocol.Bytes(me.PeerId[:])
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
me.mu.Lock()
|
2014-03-16 23:30:10 +08:00
|
|
|
defer me.mu.Unlock()
|
|
|
|
if !me.addConnection(torrent, conn) {
|
|
|
|
return
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
if torrent.haveAnyPieces() {
|
|
|
|
conn.Post(peer_protocol.Message{
|
|
|
|
Type: peer_protocol.Bitfield,
|
|
|
|
Bitfield: torrent.bitfield(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
err = me.connectionLoop(torrent, conn)
|
|
|
|
if err != nil {
|
2013-10-22 15:00:35 +08:00
|
|
|
err = fmt.Errorf("during Connection loop: %s", err)
|
2013-10-20 22:07:01 +08:00
|
|
|
}
|
|
|
|
me.dropConnection(torrent, conn)
|
2013-10-15 16:42:30 +08:00
|
|
|
return
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
|
|
|
|
2013-10-22 15:00:35 +08:00
|
|
|
func (me *Client) peerGotPiece(torrent *Torrent, conn *Connection, piece int) {
|
2013-10-01 16:43:18 +08:00
|
|
|
if conn.PeerPieces == nil {
|
|
|
|
conn.PeerPieces = make([]bool, len(torrent.Pieces))
|
|
|
|
}
|
|
|
|
conn.PeerPieces[piece] = true
|
|
|
|
if torrent.wantPiece(piece) {
|
|
|
|
me.replenishConnRequests(torrent, conn)
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
2013-10-01 16:43:18 +08:00
|
|
|
}
|
|
|
|
|
2013-10-06 15:01:39 +08:00
|
|
|
func (t *Torrent) wantPiece(index int) bool {
|
2013-10-22 15:03:44 +08:00
|
|
|
p := t.Pieces[index]
|
2014-03-20 01:30:08 +08:00
|
|
|
return p.EverHashed && len(p.PendingChunkSpecs) != 0
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
|
|
|
|
2013-10-22 15:00:35 +08:00
|
|
|
func (me *Client) peerUnchoked(torrent *Torrent, conn *Connection) {
|
2013-10-01 16:43:18 +08:00
|
|
|
me.replenishConnRequests(torrent, conn)
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
|
|
|
|
2013-10-22 15:00:35 +08:00
|
|
|
func (me *Client) connectionLoop(torrent *Torrent, conn *Connection) error {
|
2013-09-30 19:51:08 +08:00
|
|
|
decoder := peer_protocol.Decoder{
|
|
|
|
R: bufio.NewReader(conn.Socket),
|
|
|
|
MaxLength: 256 * 1024,
|
|
|
|
}
|
|
|
|
for {
|
2013-10-20 22:07:01 +08:00
|
|
|
me.mu.Unlock()
|
2014-03-20 13:58:09 +08:00
|
|
|
// TODO: Can this be allocated on the stack?
|
2013-09-30 19:51:08 +08:00
|
|
|
msg := new(peer_protocol.Message)
|
|
|
|
err := decoder.Decode(msg)
|
2013-10-20 22:07:01 +08:00
|
|
|
me.mu.Lock()
|
2013-09-30 19:51:08 +08:00
|
|
|
if err != nil {
|
2014-03-20 13:58:09 +08:00
|
|
|
if me.stopped() {
|
|
|
|
return nil
|
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if msg.Keepalive {
|
|
|
|
continue
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
switch msg.Type {
|
|
|
|
case peer_protocol.Choke:
|
|
|
|
conn.PeerChoked = true
|
|
|
|
conn.Requests = nil
|
|
|
|
case peer_protocol.Unchoke:
|
|
|
|
conn.PeerChoked = false
|
|
|
|
me.peerUnchoked(torrent, conn)
|
|
|
|
case peer_protocol.Interested:
|
|
|
|
conn.PeerInterested = true
|
2014-03-17 22:44:22 +08:00
|
|
|
// TODO: This should be done from a dedicated unchoking routine.
|
|
|
|
conn.Unchoke()
|
2013-10-20 22:07:01 +08:00
|
|
|
case peer_protocol.NotInterested:
|
|
|
|
conn.PeerInterested = false
|
|
|
|
case peer_protocol.Have:
|
|
|
|
me.peerGotPiece(torrent, conn, int(msg.Index))
|
|
|
|
case peer_protocol.Request:
|
2014-03-17 22:44:22 +08:00
|
|
|
if conn.PeerRequests == nil {
|
|
|
|
conn.PeerRequests = make(map[Request]struct{}, maxRequests)
|
|
|
|
}
|
|
|
|
request := Request{
|
2013-10-20 22:07:01 +08:00
|
|
|
Index: msg.Index,
|
|
|
|
ChunkSpec: ChunkSpec{msg.Begin, msg.Length},
|
2014-03-17 22:44:22 +08:00
|
|
|
}
|
|
|
|
conn.PeerRequests[request] = struct{}{}
|
|
|
|
// TODO: Requests should be satisfied from a dedicated upload routine.
|
|
|
|
p := make([]byte, msg.Length)
|
|
|
|
n, err := torrent.Data.ReadAt(p, int64(torrent.PieceLength(0))*int64(msg.Index)+int64(msg.Begin))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("reading torrent data to serve request %s: %s", request, err)
|
|
|
|
}
|
|
|
|
if n != int(msg.Length) {
|
|
|
|
return fmt.Errorf("bad request: %s", msg)
|
|
|
|
}
|
|
|
|
conn.Post(peer_protocol.Message{
|
|
|
|
Type: peer_protocol.Piece,
|
|
|
|
Index: msg.Index,
|
|
|
|
Begin: msg.Begin,
|
|
|
|
Piece: p,
|
|
|
|
})
|
2013-10-20 22:07:01 +08:00
|
|
|
case peer_protocol.Bitfield:
|
|
|
|
if len(msg.Bitfield) < len(torrent.Pieces) {
|
|
|
|
err = errors.New("received invalid bitfield")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if conn.PeerPieces != nil {
|
|
|
|
err = errors.New("received unexpected bitfield")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
conn.PeerPieces = msg.Bitfield[:len(torrent.Pieces)]
|
|
|
|
for index, has := range conn.PeerPieces {
|
|
|
|
if has {
|
|
|
|
me.peerGotPiece(torrent, conn, index)
|
2013-10-02 15:57:59 +08:00
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
case peer_protocol.Piece:
|
|
|
|
request_ := Request{msg.Index, ChunkSpec{msg.Begin, peer_protocol.Integer(len(msg.Piece))}}
|
|
|
|
if _, ok := conn.Requests[request_]; !ok {
|
|
|
|
err = errors.New("unexpected piece")
|
|
|
|
break
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
delete(conn.Requests, request_)
|
|
|
|
err = me.downloadedChunk(torrent, msg)
|
|
|
|
default:
|
|
|
|
log.Printf("received unknown message type: %#v", msg.Type)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-03-20 01:30:08 +08:00
|
|
|
log.Print("replenishing from loop")
|
2013-10-20 22:07:01 +08:00
|
|
|
me.replenishConnRequests(torrent, conn)
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-22 15:00:35 +08:00
|
|
|
func (me *Client) dropConnection(torrent *Torrent, conn *Connection) {
|
2013-09-30 19:51:08 +08:00
|
|
|
conn.Socket.Close()
|
|
|
|
for i0, c := range torrent.Conns {
|
|
|
|
if c != conn {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
i1 := len(torrent.Conns) - 1
|
|
|
|
if i0 != i1 {
|
|
|
|
torrent.Conns[i0] = torrent.Conns[i1]
|
|
|
|
}
|
|
|
|
torrent.Conns = torrent.Conns[:i1]
|
|
|
|
return
|
|
|
|
}
|
2013-10-22 15:00:35 +08:00
|
|
|
panic("no such Connection")
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
|
|
|
|
2013-10-22 15:00:35 +08:00
|
|
|
func (me *Client) addConnection(t *Torrent, c *Connection) bool {
|
2014-03-16 23:30:10 +08:00
|
|
|
for _, c0 := range t.Conns {
|
|
|
|
if c.PeerId == c0.PeerId {
|
|
|
|
log.Printf("%s and %s have the same ID: %s", c.Socket.RemoteAddr(), c0.Socket.RemoteAddr(), c.PeerId)
|
2013-09-30 19:51:08 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t.Conns = append(t.Conns, c)
|
|
|
|
return true
|
2013-09-29 06:11:24 +08:00
|
|
|
}
|
|
|
|
|
2013-10-06 15:01:39 +08:00
|
|
|
func (me *Client) openNewConns() {
|
2013-09-29 06:11:24 +08:00
|
|
|
for _, t := range me.torrents {
|
|
|
|
for len(t.Peers) != 0 {
|
|
|
|
if me.halfOpen >= me.HalfOpenLimit {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
p := t.Peers[0]
|
|
|
|
t.Peers = t.Peers[1:]
|
|
|
|
me.initiateConn(p, t)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-20 22:07:01 +08:00
|
|
|
func (me *Client) AddPeers(infoHash InfoHash, peers []Peer) error {
|
|
|
|
me.mu.Lock()
|
|
|
|
t := me.torrent(infoHash)
|
|
|
|
if t == nil {
|
|
|
|
return errors.New("no such torrent")
|
|
|
|
}
|
|
|
|
t.Peers = append(t.Peers, peers...)
|
|
|
|
me.openNewConns()
|
|
|
|
me.mu.Unlock()
|
|
|
|
return nil
|
2013-09-29 06:11:24 +08:00
|
|
|
}
|
|
|
|
|
2014-03-16 23:30:10 +08:00
|
|
|
// Prepare a Torrent without any attachment to a Client. That means we can
|
|
|
|
// initialize fields all fields that don't require the Client without locking
|
|
|
|
// it.
|
|
|
|
func newTorrent(metaInfo *metainfo.MetaInfo, dataDir string) (torrent *Torrent, err error) {
|
|
|
|
torrent = &Torrent{
|
2013-09-26 17:49:15 +08:00
|
|
|
InfoHash: BytesInfoHash(metaInfo.InfoHash),
|
2013-10-13 20:16:21 +08:00
|
|
|
MetaInfo: metaInfo,
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
2014-03-20 13:58:09 +08:00
|
|
|
torrent.Data, err = mmapTorrentData(metaInfo, dataDir)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2013-09-26 17:49:15 +08:00
|
|
|
for offset := 0; offset < len(metaInfo.Pieces); offset += PieceHash.Size() {
|
|
|
|
hash := metaInfo.Pieces[offset : offset+PieceHash.Size()]
|
|
|
|
if len(hash) != PieceHash.Size() {
|
2014-03-16 23:30:10 +08:00
|
|
|
err = errors.New("bad piece hash in metainfo")
|
|
|
|
return
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
piece := &piece{}
|
2013-09-26 17:49:15 +08:00
|
|
|
copyHashSum(piece.Hash[:], hash)
|
|
|
|
torrent.Pieces = append(torrent.Pieces, piece)
|
2014-03-20 13:58:09 +08:00
|
|
|
torrent.pendAllChunkSpecs(peer_protocol.Integer(len(torrent.Pieces) - 1))
|
2014-03-16 23:30:10 +08:00
|
|
|
}
|
|
|
|
torrent.Trackers = make([][]tracker.Client, len(metaInfo.AnnounceList))
|
|
|
|
for tierIndex := range metaInfo.AnnounceList {
|
|
|
|
tier := torrent.Trackers[tierIndex]
|
|
|
|
for _, url := range metaInfo.AnnounceList[tierIndex] {
|
|
|
|
tr, err := tracker.New(url)
|
|
|
|
if err != nil {
|
|
|
|
log.Print(err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
tier = append(tier, tr)
|
|
|
|
}
|
|
|
|
// The trackers within each tier must be shuffled before use.
|
|
|
|
// http://stackoverflow.com/a/12267471/149482
|
|
|
|
// http://www.bittorrent.org/beps/bep_0012.html#order-of-processing
|
|
|
|
for i := range tier {
|
|
|
|
j := mathRand.Intn(i + 1)
|
|
|
|
tier[i], tier[j] = tier[j], tier[i]
|
|
|
|
}
|
|
|
|
torrent.Trackers[tierIndex] = tier
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) AddTorrent(metaInfo *metainfo.MetaInfo) error {
|
|
|
|
torrent, err := newTorrent(metaInfo, me.DataDir)
|
2013-09-26 17:49:15 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
me.mu.Lock()
|
|
|
|
defer me.mu.Unlock()
|
|
|
|
if _, ok := me.torrents[torrent.InfoHash]; ok {
|
|
|
|
return torrent.Close()
|
|
|
|
}
|
|
|
|
me.torrents[torrent.InfoHash] = torrent
|
2014-03-20 01:30:08 +08:00
|
|
|
if !me.DisableTrackers {
|
|
|
|
go me.announceTorrent(torrent)
|
|
|
|
}
|
2014-03-17 22:44:22 +08:00
|
|
|
for i := range torrent.Pieces {
|
|
|
|
me.queuePieceCheck(torrent, peer_protocol.Integer(i))
|
|
|
|
}
|
2013-09-26 17:49:15 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-03-16 23:30:10 +08:00
|
|
|
func (cl *Client) announceTorrent(t *Torrent) {
|
|
|
|
req := tracker.AnnounceRequest{
|
|
|
|
Event: tracker.Started,
|
|
|
|
NumWant: -1,
|
|
|
|
}
|
|
|
|
req.PeerId = cl.PeerId
|
|
|
|
req.InfoHash = t.InfoHash
|
|
|
|
newAnnounce:
|
|
|
|
for {
|
|
|
|
for _, tier := range t.Trackers {
|
|
|
|
for trIndex, tr := range tier {
|
|
|
|
if err := tr.Connect(); err != nil {
|
|
|
|
log.Print(err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
resp, err := tr.Announce(&req)
|
|
|
|
if err != nil {
|
|
|
|
log.Print(err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
var peers []Peer
|
|
|
|
for _, peer := range resp.Peers {
|
|
|
|
peers = append(peers, Peer{
|
2014-03-17 22:44:22 +08:00
|
|
|
IP: peer.IP,
|
|
|
|
Port: peer.Port,
|
2014-03-16 23:30:10 +08:00
|
|
|
})
|
|
|
|
}
|
|
|
|
if err := cl.AddPeers(t.InfoHash, peers); err != nil {
|
|
|
|
log.Print(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
log.Printf("%d new peers from %s", len(peers), "TODO")
|
|
|
|
tier[0], tier[trIndex] = tier[trIndex], tier[0]
|
|
|
|
time.Sleep(time.Second * time.Duration(resp.Interval))
|
|
|
|
continue newAnnounce
|
|
|
|
}
|
|
|
|
}
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) allTorrentsCompleted() bool {
|
|
|
|
for _, t := range cl.torrents {
|
|
|
|
if !t.haveAllPieces() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2013-10-06 15:01:39 +08:00
|
|
|
func (me *Client) WaitAll() {
|
2013-10-20 22:07:01 +08:00
|
|
|
me.mu.Lock()
|
2014-03-16 23:30:10 +08:00
|
|
|
for !me.allTorrentsCompleted() {
|
2013-10-20 22:07:01 +08:00
|
|
|
me.event.Wait()
|
|
|
|
}
|
|
|
|
me.mu.Unlock()
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
|
|
|
|
2013-10-22 15:00:35 +08:00
|
|
|
func (me *Client) replenishConnRequests(torrent *Torrent, conn *Connection) {
|
2013-10-14 22:39:12 +08:00
|
|
|
requestHeatMap := torrent.requestHeat()
|
2013-10-15 16:42:30 +08:00
|
|
|
addRequest := func(req Request) (again bool) {
|
2013-10-20 22:07:01 +08:00
|
|
|
piece := torrent.Pieces[req.Index]
|
|
|
|
if piece.Hashing {
|
2014-03-20 01:30:08 +08:00
|
|
|
// We can't be sure we want this.
|
|
|
|
log.Print("piece is hashing")
|
2013-10-15 16:42:30 +08:00
|
|
|
return true
|
2013-10-01 16:43:18 +08:00
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
if piece.Complete() {
|
2014-03-20 01:30:08 +08:00
|
|
|
log.Print("piece is complete")
|
|
|
|
// We already have this.
|
2013-10-15 16:42:30 +08:00
|
|
|
return true
|
2013-10-14 22:39:12 +08:00
|
|
|
}
|
|
|
|
if requestHeatMap[req] > 0 {
|
2014-03-20 01:30:08 +08:00
|
|
|
log.Print("piece is hot")
|
|
|
|
// We've already requested this.
|
2013-10-15 16:42:30 +08:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
return conn.Request(req)
|
|
|
|
}
|
2014-03-20 01:30:08 +08:00
|
|
|
// First request prioritized chunks.
|
2013-10-15 16:42:30 +08:00
|
|
|
if torrent.Priorities != nil {
|
|
|
|
for e := torrent.Priorities.Front(); e != nil; e = e.Next() {
|
2014-03-20 01:30:08 +08:00
|
|
|
log.Print(e.Value.(Request))
|
2013-10-15 16:42:30 +08:00
|
|
|
if !addRequest(e.Value.(Request)) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-03-20 01:30:08 +08:00
|
|
|
// Then finish of incomplete pieces in order of bytes remaining.
|
2013-10-15 16:42:30 +08:00
|
|
|
for _, index := range torrent.piecesByPendingBytesDesc() {
|
2013-10-20 22:07:01 +08:00
|
|
|
if torrent.PieceNumPendingBytes(index) == torrent.PieceLength(index) {
|
2013-10-14 22:39:12 +08:00
|
|
|
continue
|
|
|
|
}
|
2013-10-15 16:42:30 +08:00
|
|
|
for chunkSpec := range torrent.Pieces[index].PendingChunkSpecs {
|
|
|
|
if !addRequest(Request{index, chunkSpec}) {
|
|
|
|
return
|
|
|
|
}
|
2013-10-01 16:43:18 +08:00
|
|
|
}
|
|
|
|
}
|
2014-03-20 13:58:09 +08:00
|
|
|
if len(conn.Requests) == 0 {
|
|
|
|
conn.SetInterested(false)
|
|
|
|
}
|
2013-10-13 20:16:21 +08:00
|
|
|
}
|
2013-10-01 16:43:18 +08:00
|
|
|
|
2013-10-14 22:39:12 +08:00
|
|
|
func (me *Client) downloadedChunk(torrent *Torrent, msg *peer_protocol.Message) (err error) {
|
|
|
|
request := Request{msg.Index, ChunkSpec{msg.Begin, peer_protocol.Integer(len(msg.Piece))}}
|
|
|
|
if _, ok := torrent.Pieces[request.Index].PendingChunkSpecs[request.ChunkSpec]; !ok {
|
|
|
|
log.Printf("got unnecessary chunk: %s", request)
|
|
|
|
return
|
|
|
|
}
|
2013-10-15 16:42:30 +08:00
|
|
|
log.Printf("got chunk %s", request)
|
2013-10-14 22:39:12 +08:00
|
|
|
err = torrent.WriteChunk(int(msg.Index), int64(msg.Begin), msg.Piece)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
delete(torrent.Pieces[request.Index].PendingChunkSpecs, request.ChunkSpec)
|
|
|
|
if len(torrent.Pieces[request.Index].PendingChunkSpecs) == 0 {
|
2013-10-20 22:07:01 +08:00
|
|
|
me.queuePieceCheck(torrent, request.Index)
|
2013-10-14 22:39:12 +08:00
|
|
|
}
|
|
|
|
var next *list.Element
|
|
|
|
for e := torrent.Priorities.Front(); e != nil; e = next {
|
|
|
|
next = e.Next()
|
|
|
|
if e.Value.(Request) == request {
|
|
|
|
torrent.Priorities.Remove(e)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
me.dataReady(DataSpec{torrent.InfoHash, request})
|
|
|
|
return
|
2013-10-13 20:16:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) dataReady(ds DataSpec) {
|
2014-03-20 01:30:08 +08:00
|
|
|
if cl.dataWaiter != nil {
|
|
|
|
close(cl.dataWaiter)
|
2013-10-13 20:16:21 +08:00
|
|
|
}
|
2014-03-20 01:30:08 +08:00
|
|
|
cl.dataWaiter = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) DataWaiter() <-chan struct{} {
|
|
|
|
cl.Lock()
|
|
|
|
defer cl.Unlock()
|
|
|
|
if cl.dataWaiter == nil {
|
|
|
|
cl.dataWaiter = make(chan struct{})
|
|
|
|
}
|
|
|
|
return cl.dataWaiter
|
2013-10-01 16:43:18 +08:00
|
|
|
}
|
|
|
|
|
2013-10-20 22:07:01 +08:00
|
|
|
func (me *Client) pieceHashed(t *Torrent, piece peer_protocol.Integer, correct bool) {
|
|
|
|
p := t.Pieces[piece]
|
|
|
|
p.EverHashed = true
|
|
|
|
if correct {
|
2014-03-20 01:30:08 +08:00
|
|
|
log.Print("piece passed hash")
|
2013-10-20 22:07:01 +08:00
|
|
|
p.PendingChunkSpecs = nil
|
2013-10-14 22:39:12 +08:00
|
|
|
var next *list.Element
|
2013-10-20 22:07:01 +08:00
|
|
|
if t.Priorities != nil {
|
|
|
|
for e := t.Priorities.Front(); e != nil; e = next {
|
2013-10-14 22:39:12 +08:00
|
|
|
next = e.Next()
|
|
|
|
if e.Value.(Request).Index == piece {
|
2013-10-20 22:07:01 +08:00
|
|
|
t.Priorities.Remove(e)
|
2013-10-14 22:39:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-10-13 20:16:21 +08:00
|
|
|
me.dataReady(DataSpec{
|
2013-10-20 22:07:01 +08:00
|
|
|
t.InfoHash,
|
2013-10-13 20:16:21 +08:00
|
|
|
Request{
|
|
|
|
peer_protocol.Integer(piece),
|
2013-10-20 22:07:01 +08:00
|
|
|
ChunkSpec{0, peer_protocol.Integer(t.PieceLength(piece))},
|
2013-10-13 20:16:21 +08:00
|
|
|
},
|
|
|
|
})
|
2013-10-20 22:07:01 +08:00
|
|
|
} else {
|
2014-03-20 01:30:08 +08:00
|
|
|
log.Print("piece failed hash")
|
2013-10-20 22:07:01 +08:00
|
|
|
if len(p.PendingChunkSpecs) == 0 {
|
2014-03-20 13:58:09 +08:00
|
|
|
t.pendAllChunkSpecs(piece)
|
2013-10-20 22:07:01 +08:00
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
2013-10-20 22:07:01 +08:00
|
|
|
for _, conn := range t.Conns {
|
2013-09-30 19:51:08 +08:00
|
|
|
if correct {
|
|
|
|
conn.Post(peer_protocol.Message{
|
|
|
|
Type: peer_protocol.Have,
|
|
|
|
Index: peer_protocol.Integer(piece),
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
if conn.PeerHasPiece(piece) {
|
2013-10-20 22:07:01 +08:00
|
|
|
me.replenishConnRequests(t, conn)
|
2013-09-30 19:51:08 +08:00
|
|
|
}
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
|
|
|
}
|
2014-03-16 23:30:10 +08:00
|
|
|
me.event.Broadcast()
|
2013-10-02 15:57:59 +08:00
|
|
|
}
|
2013-09-30 19:51:08 +08:00
|
|
|
|
2013-10-20 22:07:01 +08:00
|
|
|
func (cl *Client) verifyPiece(t *Torrent, index peer_protocol.Integer) {
|
2014-03-20 01:30:08 +08:00
|
|
|
cl.mu.Lock()
|
|
|
|
p := t.Pieces[index]
|
|
|
|
for p.Hashing {
|
|
|
|
cl.event.Wait()
|
|
|
|
}
|
|
|
|
p.Hashing = true
|
|
|
|
p.QueuedForHash = false
|
|
|
|
cl.mu.Unlock()
|
2013-10-20 22:07:01 +08:00
|
|
|
sum := t.HashPiece(index)
|
|
|
|
cl.mu.Lock()
|
2014-03-20 01:30:08 +08:00
|
|
|
p.Hashing = false
|
|
|
|
cl.pieceHashed(t, index, sum == p.Hash)
|
2013-10-20 22:07:01 +08:00
|
|
|
cl.mu.Unlock()
|
2013-09-26 17:49:15 +08:00
|
|
|
}
|
2013-10-06 15:01:39 +08:00
|
|
|
|
|
|
|
func (me *Client) Torrents() (ret []*Torrent) {
|
2013-10-20 22:07:01 +08:00
|
|
|
me.mu.Lock()
|
|
|
|
for _, t := range me.torrents {
|
|
|
|
ret = append(ret, t)
|
|
|
|
}
|
|
|
|
me.mu.Unlock()
|
2013-10-06 15:01:39 +08:00
|
|
|
return
|
|
|
|
}
|