2020-01-10 12:09:21 +08:00
|
|
|
package torrent
|
|
|
|
|
|
|
|
import (
|
2021-10-07 14:31:10 +08:00
|
|
|
"container/heap"
|
2021-10-08 10:53:36 +08:00
|
|
|
"context"
|
2021-10-05 17:06:23 +08:00
|
|
|
"encoding/gob"
|
2021-12-12 13:56:00 +08:00
|
|
|
"math/rand"
|
2021-10-05 17:06:23 +08:00
|
|
|
"reflect"
|
2021-10-08 10:53:36 +08:00
|
|
|
"runtime/pprof"
|
2020-01-10 12:09:21 +08:00
|
|
|
"time"
|
2021-05-09 22:53:32 +08:00
|
|
|
"unsafe"
|
2020-01-10 12:09:21 +08:00
|
|
|
|
2021-10-05 17:06:23 +08:00
|
|
|
"github.com/anacrolix/log"
|
2021-10-07 14:31:10 +08:00
|
|
|
"github.com/anacrolix/multiless"
|
2021-05-20 18:23:45 +08:00
|
|
|
|
2021-05-13 07:56:58 +08:00
|
|
|
request_strategy "github.com/anacrolix/torrent/request-strategy"
|
2020-01-10 12:09:21 +08:00
|
|
|
)
|
|
|
|
|
2021-11-29 10:07:18 +08:00
|
|
|
// Returns what is necessary to run request_strategy.GetRequestablePieces for primaryTorrent.
|
|
|
|
func (cl *Client) getRequestStrategyInput(primaryTorrent *Torrent) (input request_strategy.Input) {
|
|
|
|
input.MaxUnverifiedBytes = cl.config.MaxUnverifiedBytes
|
|
|
|
if !primaryTorrent.haveInfo() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if capFunc := primaryTorrent.storage.Capacity; capFunc != nil {
|
|
|
|
if cap, ok := (*capFunc)(); ok {
|
|
|
|
input.Capacity = &cap
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if input.Capacity == nil {
|
|
|
|
input.Torrents = []request_strategy.Torrent{primaryTorrent.requestStrategyTorrentInput()}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
input.Torrents = make([]request_strategy.Torrent, 0, len(cl.torrents))
|
2021-05-09 12:14:11 +08:00
|
|
|
for _, t := range cl.torrents {
|
2021-09-19 13:16:37 +08:00
|
|
|
if !t.haveInfo() {
|
2021-11-29 10:07:18 +08:00
|
|
|
// This would be removed if metadata is handled here. Determining chunks per piece
|
|
|
|
// requires the info. If we have no info, we have no pieces too, so the end result is
|
|
|
|
// the same.
|
2021-09-19 13:16:37 +08:00
|
|
|
continue
|
|
|
|
}
|
2021-11-29 10:07:18 +08:00
|
|
|
if t.storage.Capacity != primaryTorrent.storage.Capacity {
|
|
|
|
continue
|
2021-05-12 15:45:36 +08:00
|
|
|
}
|
2021-11-29 10:07:18 +08:00
|
|
|
input.Torrents = append(input.Torrents, t.requestStrategyTorrentInput())
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *Torrent) getRequestStrategyInput() request_strategy.Input {
|
|
|
|
return t.cl.getRequestStrategyInput(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *Torrent) requestStrategyTorrentInput() request_strategy.Torrent {
|
|
|
|
rst := request_strategy.Torrent{
|
|
|
|
InfoHash: t.infoHash,
|
|
|
|
ChunksPerPiece: t.chunksPerRegularPiece(),
|
2020-01-24 14:55:20 +08:00
|
|
|
}
|
2021-11-29 10:07:18 +08:00
|
|
|
rst.Pieces = make([]request_strategy.Piece, 0, len(t.pieces))
|
|
|
|
for i := range t.pieces {
|
|
|
|
p := &t.pieces[i]
|
|
|
|
rst.Pieces = append(rst.Pieces, request_strategy.Piece{
|
|
|
|
Request: !t.ignorePieceForRequests(i),
|
|
|
|
Priority: p.purePriority(),
|
|
|
|
Partial: t.piecePartiallyDownloaded(i),
|
|
|
|
Availability: p.availability,
|
|
|
|
Length: int64(p.length()),
|
|
|
|
NumPendingChunks: int(t.pieceNumPendingChunks(i)),
|
|
|
|
IterPendingChunks: &p.undirtiedChunksIter,
|
|
|
|
})
|
2021-09-18 16:57:50 +08:00
|
|
|
}
|
2021-11-29 10:07:18 +08:00
|
|
|
return rst
|
2021-09-18 16:57:50 +08:00
|
|
|
}
|
|
|
|
|
2021-10-05 17:06:23 +08:00
|
|
|
func init() {
|
|
|
|
gob.Register(peerId{})
|
|
|
|
}
|
|
|
|
|
2021-09-10 21:02:20 +08:00
|
|
|
type peerId struct {
|
|
|
|
*Peer
|
|
|
|
ptr uintptr
|
|
|
|
}
|
2021-05-13 09:26:22 +08:00
|
|
|
|
2021-09-10 21:02:20 +08:00
|
|
|
func (p peerId) Uintptr() uintptr {
|
|
|
|
return p.ptr
|
2021-05-13 09:26:22 +08:00
|
|
|
}
|
|
|
|
|
2021-10-05 17:06:23 +08:00
|
|
|
func (p peerId) GobEncode() (b []byte, _ error) {
|
|
|
|
*(*reflect.SliceHeader)(unsafe.Pointer(&b)) = reflect.SliceHeader{
|
|
|
|
Data: uintptr(unsafe.Pointer(&p.ptr)),
|
|
|
|
Len: int(unsafe.Sizeof(p.ptr)),
|
|
|
|
Cap: int(unsafe.Sizeof(p.ptr)),
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *peerId) GobDecode(b []byte) error {
|
|
|
|
if uintptr(len(b)) != unsafe.Sizeof(p.ptr) {
|
|
|
|
panic(len(b))
|
|
|
|
}
|
|
|
|
ptr := unsafe.Pointer(&b[0])
|
|
|
|
p.ptr = *(*uintptr)(ptr)
|
|
|
|
log.Printf("%p", ptr)
|
|
|
|
dst := reflect.SliceHeader{
|
|
|
|
Data: uintptr(unsafe.Pointer(&p.Peer)),
|
|
|
|
Len: int(unsafe.Sizeof(p.Peer)),
|
|
|
|
Cap: int(unsafe.Sizeof(p.Peer)),
|
|
|
|
}
|
|
|
|
copy(*(*[]byte)(unsafe.Pointer(&dst)), b)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-11-08 11:47:01 +08:00
|
|
|
type (
|
|
|
|
RequestIndex = request_strategy.RequestIndex
|
|
|
|
chunkIndexType = request_strategy.ChunkIndex
|
|
|
|
)
|
2021-09-19 13:16:37 +08:00
|
|
|
|
2021-10-07 14:31:10 +08:00
|
|
|
type peerRequests struct {
|
|
|
|
requestIndexes []RequestIndex
|
|
|
|
peer *Peer
|
2021-11-29 10:46:44 +08:00
|
|
|
torrentStrategyInput *request_strategy.Torrent
|
2021-10-07 14:31:10 +08:00
|
|
|
}
|
|
|
|
|
2021-10-12 10:06:32 +08:00
|
|
|
func (p *peerRequests) Len() int {
|
2021-10-07 14:31:10 +08:00
|
|
|
return len(p.requestIndexes)
|
|
|
|
}
|
|
|
|
|
2021-10-12 10:06:32 +08:00
|
|
|
func (p *peerRequests) Less(i, j int) bool {
|
2021-10-07 14:31:10 +08:00
|
|
|
leftRequest := p.requestIndexes[i]
|
|
|
|
rightRequest := p.requestIndexes[j]
|
|
|
|
t := p.peer.t
|
2021-10-10 08:32:27 +08:00
|
|
|
leftPieceIndex := leftRequest / p.torrentStrategyInput.ChunksPerPiece
|
|
|
|
rightPieceIndex := rightRequest / p.torrentStrategyInput.ChunksPerPiece
|
2021-10-07 14:31:10 +08:00
|
|
|
leftCurrent := p.peer.actualRequestState.Requests.Contains(leftRequest)
|
|
|
|
rightCurrent := p.peer.actualRequestState.Requests.Contains(rightRequest)
|
|
|
|
pending := func(index RequestIndex, current bool) int {
|
2021-10-09 16:00:58 +08:00
|
|
|
ret := t.pendingRequests.Get(index)
|
2021-10-07 14:31:10 +08:00
|
|
|
if current {
|
|
|
|
ret--
|
2021-09-20 13:09:28 +08:00
|
|
|
}
|
2021-10-25 13:36:58 +08:00
|
|
|
// See https://github.com/anacrolix/torrent/issues/679 for possible issues. This should be
|
|
|
|
// resolved.
|
2021-10-12 10:06:59 +08:00
|
|
|
if ret < 0 {
|
|
|
|
panic(ret)
|
|
|
|
}
|
2021-10-07 14:31:10 +08:00
|
|
|
return ret
|
|
|
|
}
|
|
|
|
ml := multiless.New()
|
2021-10-11 15:21:24 +08:00
|
|
|
// Push requests that can't be served right now to the end. But we don't throw them away unless
|
|
|
|
// there's a better alternative. This is for when we're using the fast extension and get choked
|
|
|
|
// but our requests could still be good when we get unchoked.
|
|
|
|
if p.peer.peerChoking {
|
|
|
|
ml = ml.Bool(
|
|
|
|
!p.peer.peerAllowedFast.Contains(leftPieceIndex),
|
|
|
|
!p.peer.peerAllowedFast.Contains(rightPieceIndex),
|
|
|
|
)
|
|
|
|
}
|
2021-10-07 14:31:10 +08:00
|
|
|
ml = ml.Int(
|
|
|
|
pending(leftRequest, leftCurrent),
|
|
|
|
pending(rightRequest, rightCurrent))
|
2021-10-12 09:31:47 +08:00
|
|
|
ml = ml.Bool(!leftCurrent, !rightCurrent)
|
2021-10-07 14:31:10 +08:00
|
|
|
ml = ml.Int(
|
2021-10-12 09:31:47 +08:00
|
|
|
-int(p.torrentStrategyInput.Pieces[leftPieceIndex].Priority),
|
|
|
|
-int(p.torrentStrategyInput.Pieces[rightPieceIndex].Priority),
|
|
|
|
)
|
2021-10-07 14:31:10 +08:00
|
|
|
ml = ml.Int(
|
|
|
|
int(p.torrentStrategyInput.Pieces[leftPieceIndex].Availability),
|
|
|
|
int(p.torrentStrategyInput.Pieces[rightPieceIndex].Availability))
|
|
|
|
ml = ml.Uint32(leftPieceIndex, rightPieceIndex)
|
|
|
|
ml = ml.Uint32(leftRequest, rightRequest)
|
|
|
|
return ml.MustLess()
|
|
|
|
}
|
|
|
|
|
2021-10-12 09:31:47 +08:00
|
|
|
func (p *peerRequests) Swap(i, j int) {
|
2021-10-07 14:31:10 +08:00
|
|
|
p.requestIndexes[i], p.requestIndexes[j] = p.requestIndexes[j], p.requestIndexes[i]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *peerRequests) Push(x interface{}) {
|
|
|
|
p.requestIndexes = append(p.requestIndexes, x.(RequestIndex))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *peerRequests) Pop() interface{} {
|
|
|
|
last := len(p.requestIndexes) - 1
|
|
|
|
x := p.requestIndexes[last]
|
|
|
|
p.requestIndexes = p.requestIndexes[:last]
|
|
|
|
return x
|
|
|
|
}
|
|
|
|
|
2021-10-27 07:13:39 +08:00
|
|
|
type desiredRequestState struct {
|
|
|
|
Requests []RequestIndex
|
|
|
|
Interested bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Peer) getDesiredRequestState() (desired desiredRequestState) {
|
2021-11-29 10:07:18 +08:00
|
|
|
input := p.t.getRequestStrategyInput()
|
2021-10-07 14:31:10 +08:00
|
|
|
requestHeap := peerRequests{
|
2021-10-11 13:21:26 +08:00
|
|
|
peer: p,
|
2021-10-07 14:31:10 +08:00
|
|
|
}
|
2021-11-29 10:46:44 +08:00
|
|
|
for i := range input.Torrents {
|
|
|
|
t := &input.Torrents[i]
|
2021-10-07 14:31:10 +08:00
|
|
|
if t.InfoHash == p.t.infoHash {
|
|
|
|
requestHeap.torrentStrategyInput = t
|
|
|
|
break
|
2021-09-20 13:09:28 +08:00
|
|
|
}
|
2021-10-07 14:31:10 +08:00
|
|
|
}
|
|
|
|
request_strategy.GetRequestablePieces(
|
|
|
|
input,
|
|
|
|
func(t *request_strategy.Torrent, rsp *request_strategy.Piece, pieceIndex int) {
|
|
|
|
if t.InfoHash != p.t.infoHash {
|
|
|
|
return
|
2021-09-18 18:34:14 +08:00
|
|
|
}
|
2021-10-07 14:31:10 +08:00
|
|
|
if !p.peerHasPiece(pieceIndex) {
|
|
|
|
return
|
2021-05-20 18:23:45 +08:00
|
|
|
}
|
2021-10-11 13:21:26 +08:00
|
|
|
allowedFast := p.peerAllowedFast.ContainsInt(pieceIndex)
|
2021-10-07 14:31:10 +08:00
|
|
|
rsp.IterPendingChunks.Iter(func(ci request_strategy.ChunkIndex) {
|
2021-10-25 17:53:39 +08:00
|
|
|
r := p.t.pieceRequestIndexOffset(pieceIndex) + ci
|
2021-12-12 13:56:00 +08:00
|
|
|
// if p.t.pendingRequests.Get(r) != 0 && !p.actualRequestState.Requests.Contains(r) {
|
2021-10-25 18:37:25 +08:00
|
|
|
// return
|
2021-12-12 13:56:00 +08:00
|
|
|
// }
|
2021-10-11 15:21:24 +08:00
|
|
|
if !allowedFast {
|
2021-10-25 10:00:56 +08:00
|
|
|
// We must signal interest to request this
|
2021-10-11 15:21:24 +08:00
|
|
|
desired.Interested = true
|
2021-10-25 10:00:56 +08:00
|
|
|
// We can make or will allow sustaining a request here if we're not choked, or
|
|
|
|
// have made the request previously (presumably while unchoked), and haven't had
|
|
|
|
// the peer respond yet (and the request was retained because we are using the
|
|
|
|
// fast extension).
|
2021-10-25 17:53:39 +08:00
|
|
|
if p.peerChoking && !p.actualRequestState.Requests.Contains(r) {
|
2021-10-11 15:21:24 +08:00
|
|
|
// We can't request this right now.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2021-10-25 17:53:39 +08:00
|
|
|
requestHeap.requestIndexes = append(requestHeap.requestIndexes, r)
|
2021-10-07 14:31:10 +08:00
|
|
|
})
|
|
|
|
},
|
|
|
|
)
|
2021-10-25 13:16:56 +08:00
|
|
|
p.t.assertPendingRequests()
|
2021-10-07 14:31:10 +08:00
|
|
|
heap.Init(&requestHeap)
|
2021-10-27 07:13:39 +08:00
|
|
|
for requestHeap.Len() != 0 && len(desired.Requests) < p.nominalMaxRequests() {
|
2021-10-07 14:31:10 +08:00
|
|
|
requestIndex := heap.Pop(&requestHeap).(RequestIndex)
|
2021-10-27 07:13:39 +08:00
|
|
|
desired.Requests = append(desired.Requests, requestIndex)
|
2020-01-24 14:55:20 +08:00
|
|
|
}
|
2021-10-07 14:31:10 +08:00
|
|
|
return
|
|
|
|
}
|
2021-09-20 13:09:28 +08:00
|
|
|
|
2021-10-21 07:28:57 +08:00
|
|
|
func (p *Peer) maybeUpdateActualRequestState() bool {
|
2021-10-08 10:53:36 +08:00
|
|
|
if p.needRequestUpdate == "" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
var more bool
|
|
|
|
pprof.Do(
|
|
|
|
context.Background(),
|
|
|
|
pprof.Labels("update request", p.needRequestUpdate),
|
|
|
|
func(_ context.Context) {
|
|
|
|
next := p.getDesiredRequestState()
|
|
|
|
more = p.applyRequestState(next)
|
|
|
|
},
|
|
|
|
)
|
|
|
|
return more
|
|
|
|
}
|
|
|
|
|
2021-10-21 07:28:57 +08:00
|
|
|
// Transmit/action the request state to the peer.
|
2021-10-27 07:13:39 +08:00
|
|
|
func (p *Peer) applyRequestState(next desiredRequestState) bool {
|
2021-10-18 15:40:33 +08:00
|
|
|
current := &p.actualRequestState
|
2021-09-20 13:09:28 +08:00
|
|
|
if !p.setInterested(next.Interested) {
|
2021-09-18 18:34:14 +08:00
|
|
|
return false
|
|
|
|
}
|
2021-09-20 13:09:28 +08:00
|
|
|
more := true
|
2021-10-27 07:13:39 +08:00
|
|
|
cancel := current.Requests.Clone()
|
|
|
|
for _, ri := range next.Requests {
|
|
|
|
cancel.Remove(ri)
|
|
|
|
}
|
2021-09-20 13:24:24 +08:00
|
|
|
cancel.Iterate(func(req uint32) bool {
|
|
|
|
more = p.cancel(req)
|
|
|
|
return more
|
2021-09-20 13:09:28 +08:00
|
|
|
})
|
|
|
|
if !more {
|
|
|
|
return false
|
2021-09-18 18:34:14 +08:00
|
|
|
}
|
2021-12-12 13:56:00 +08:00
|
|
|
shuffled := false
|
|
|
|
lastPending := 0
|
|
|
|
for i := 0; i < len(next.Requests); i++ {
|
|
|
|
req := next.Requests[i]
|
2021-10-13 07:16:56 +08:00
|
|
|
if p.cancelledRequests.Contains(req) {
|
2021-10-18 15:40:33 +08:00
|
|
|
// Waiting for a reject or piece message, which will suitably trigger us to update our
|
|
|
|
// requests, so we can skip this one with no additional consideration.
|
2021-10-18 18:52:09 +08:00
|
|
|
continue
|
2021-10-13 07:16:56 +08:00
|
|
|
}
|
2021-10-21 07:28:57 +08:00
|
|
|
// The cardinality of our desired requests shouldn't exceed the max requests since it's used
|
2021-10-27 07:13:39 +08:00
|
|
|
// in the calculation of the requests. However, if we cancelled requests and they haven't
|
2021-10-21 07:28:57 +08:00
|
|
|
// been rejected or serviced yet with the fast extension enabled, we can end up with more
|
|
|
|
// extra outstanding requests. We could subtract the number of outstanding cancels from the
|
|
|
|
// next request cardinality, but peers might not like that.
|
2021-10-13 07:16:56 +08:00
|
|
|
if maxRequests(current.Requests.GetCardinality()) >= p.nominalMaxRequests() {
|
2021-12-12 13:56:00 +08:00
|
|
|
// log.Printf("not assigning all requests [desired=%v, cancelled=%v, current=%v, max=%v]",
|
2021-10-18 16:06:33 +08:00
|
|
|
// next.Requests.GetCardinality(),
|
|
|
|
// p.cancelledRequests.GetCardinality(),
|
|
|
|
// current.Requests.GetCardinality(),
|
|
|
|
// p.nominalMaxRequests(),
|
2021-12-12 13:56:00 +08:00
|
|
|
// )
|
2021-10-18 18:52:09 +08:00
|
|
|
break
|
2021-09-20 13:09:28 +08:00
|
|
|
}
|
2021-12-12 13:56:00 +08:00
|
|
|
otherPending := p.t.pendingRequests.Get(next.Requests[0])
|
|
|
|
if p.actualRequestState.Requests.Contains(next.Requests[0]) {
|
|
|
|
otherPending--
|
|
|
|
}
|
|
|
|
if otherPending < lastPending {
|
|
|
|
// Pending should only rise. It's supposed to be the strongest ordering criteria. If it
|
|
|
|
// doesn't, our shuffling condition could be wrong.
|
|
|
|
panic(lastPending)
|
|
|
|
}
|
|
|
|
// If the request has already been requested by another peer, shuffle this and the rest of
|
|
|
|
// the requests (since according to the increasing condition, the rest of the indices
|
|
|
|
// already have an outstanding request with another peer).
|
|
|
|
if !shuffled && otherPending > 0 {
|
|
|
|
shuffleReqs := next.Requests[i:]
|
|
|
|
rand.Shuffle(len(shuffleReqs), func(i, j int) {
|
|
|
|
shuffleReqs[i], shuffleReqs[j] = shuffleReqs[j], shuffleReqs[i]
|
|
|
|
})
|
|
|
|
// log.Printf("shuffled reqs [%v:%v]", i, len(next.Requests))
|
|
|
|
shuffled = true
|
|
|
|
// Repeat this index
|
|
|
|
i--
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-10-26 12:03:04 +08:00
|
|
|
more = p.mustRequest(req)
|
2021-10-18 18:52:09 +08:00
|
|
|
if !more {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2021-10-18 16:06:33 +08:00
|
|
|
p.updateRequestsTimer.Stop()
|
2021-10-08 10:53:36 +08:00
|
|
|
if more {
|
|
|
|
p.needRequestUpdate = ""
|
2021-10-18 16:06:33 +08:00
|
|
|
if !current.Requests.IsEmpty() {
|
2021-10-18 18:52:31 +08:00
|
|
|
p.updateRequestsTimer.Reset(3 * time.Second)
|
2021-10-18 15:40:33 +08:00
|
|
|
}
|
2021-10-08 10:53:36 +08:00
|
|
|
}
|
2021-09-18 18:34:14 +08:00
|
|
|
return more
|
2020-01-24 14:55:20 +08:00
|
|
|
}
|