2020-01-10 12:09:21 +08:00
|
|
|
package torrent
|
|
|
|
|
|
|
|
import (
|
2021-10-08 10:53:36 +08:00
|
|
|
"context"
|
2021-10-05 17:06:23 +08:00
|
|
|
"encoding/gob"
|
2021-12-24 05:55:57 +08:00
|
|
|
"fmt"
|
2021-10-05 17:06:23 +08:00
|
|
|
"reflect"
|
2021-10-08 10:53:36 +08:00
|
|
|
"runtime/pprof"
|
2020-01-10 12:09:21 +08:00
|
|
|
"time"
|
2021-05-09 22:53:32 +08:00
|
|
|
"unsafe"
|
2020-01-10 12:09:21 +08:00
|
|
|
|
2021-10-05 17:06:23 +08:00
|
|
|
"github.com/anacrolix/log"
|
2021-10-07 14:31:10 +08:00
|
|
|
"github.com/anacrolix/multiless"
|
2022-05-11 09:20:52 +08:00
|
|
|
"github.com/anacrolix/torrent/typed-roaring"
|
2022-05-06 14:44:53 +08:00
|
|
|
"github.com/lispad/go-generics-tools/binheap"
|
2021-05-20 18:23:45 +08:00
|
|
|
|
2022-05-11 09:20:52 +08:00
|
|
|
"github.com/anacrolix/torrent/request-strategy"
|
2020-01-10 12:09:21 +08:00
|
|
|
)
|
|
|
|
|
2021-12-01 11:38:47 +08:00
|
|
|
func (t *Torrent) requestStrategyPieceOrderState(i int) request_strategy.PieceRequestOrderState {
|
|
|
|
return request_strategy.PieceRequestOrderState{
|
|
|
|
Priority: t.piece(i).purePriority(),
|
|
|
|
Partial: t.piecePartiallyDownloaded(i),
|
2021-12-17 19:06:21 +08:00
|
|
|
Availability: t.piece(i).availability(),
|
2021-12-01 11:38:47 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-05 17:06:23 +08:00
|
|
|
func init() {
|
|
|
|
gob.Register(peerId{})
|
|
|
|
}
|
|
|
|
|
2021-09-10 21:02:20 +08:00
|
|
|
type peerId struct {
|
|
|
|
*Peer
|
|
|
|
ptr uintptr
|
|
|
|
}
|
2021-05-13 09:26:22 +08:00
|
|
|
|
2021-09-10 21:02:20 +08:00
|
|
|
func (p peerId) Uintptr() uintptr {
|
|
|
|
return p.ptr
|
2021-05-13 09:26:22 +08:00
|
|
|
}
|
|
|
|
|
2021-10-05 17:06:23 +08:00
|
|
|
func (p peerId) GobEncode() (b []byte, _ error) {
|
|
|
|
*(*reflect.SliceHeader)(unsafe.Pointer(&b)) = reflect.SliceHeader{
|
|
|
|
Data: uintptr(unsafe.Pointer(&p.ptr)),
|
|
|
|
Len: int(unsafe.Sizeof(p.ptr)),
|
|
|
|
Cap: int(unsafe.Sizeof(p.ptr)),
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *peerId) GobDecode(b []byte) error {
|
|
|
|
if uintptr(len(b)) != unsafe.Sizeof(p.ptr) {
|
|
|
|
panic(len(b))
|
|
|
|
}
|
|
|
|
ptr := unsafe.Pointer(&b[0])
|
|
|
|
p.ptr = *(*uintptr)(ptr)
|
|
|
|
log.Printf("%p", ptr)
|
|
|
|
dst := reflect.SliceHeader{
|
|
|
|
Data: uintptr(unsafe.Pointer(&p.Peer)),
|
|
|
|
Len: int(unsafe.Sizeof(p.Peer)),
|
|
|
|
Cap: int(unsafe.Sizeof(p.Peer)),
|
|
|
|
}
|
|
|
|
copy(*(*[]byte)(unsafe.Pointer(&dst)), b)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-11-08 11:47:01 +08:00
|
|
|
type (
|
|
|
|
RequestIndex = request_strategy.RequestIndex
|
|
|
|
chunkIndexType = request_strategy.ChunkIndex
|
|
|
|
)
|
2021-09-19 13:16:37 +08:00
|
|
|
|
2022-05-09 09:34:08 +08:00
|
|
|
type desiredPeerRequests struct {
|
2021-12-01 16:21:25 +08:00
|
|
|
requestIndexes []RequestIndex
|
|
|
|
peer *Peer
|
2022-05-11 12:15:33 +08:00
|
|
|
pieceStates []request_strategy.PieceRequestOrderState
|
2021-10-07 14:31:10 +08:00
|
|
|
}
|
|
|
|
|
2022-05-09 09:34:08 +08:00
|
|
|
func (p *desiredPeerRequests) Len() int {
|
2021-10-07 14:31:10 +08:00
|
|
|
return len(p.requestIndexes)
|
|
|
|
}
|
|
|
|
|
2022-05-09 09:34:08 +08:00
|
|
|
func (p *desiredPeerRequests) Less(i, j int) bool {
|
2022-05-06 14:44:53 +08:00
|
|
|
return p.lessByValue(p.requestIndexes[i], p.requestIndexes[j])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *desiredPeerRequests) lessByValue(leftRequest, rightRequest RequestIndex) bool {
|
2021-10-07 14:31:10 +08:00
|
|
|
t := p.peer.t
|
2022-05-09 09:34:08 +08:00
|
|
|
leftPieceIndex := t.pieceIndexOfRequestIndex(leftRequest)
|
|
|
|
rightPieceIndex := t.pieceIndexOfRequestIndex(rightRequest)
|
2021-10-07 14:31:10 +08:00
|
|
|
ml := multiless.New()
|
2021-10-11 15:21:24 +08:00
|
|
|
// Push requests that can't be served right now to the end. But we don't throw them away unless
|
|
|
|
// there's a better alternative. This is for when we're using the fast extension and get choked
|
|
|
|
// but our requests could still be good when we get unchoked.
|
|
|
|
if p.peer.peerChoking {
|
|
|
|
ml = ml.Bool(
|
|
|
|
!p.peer.peerAllowedFast.Contains(leftPieceIndex),
|
|
|
|
!p.peer.peerAllowedFast.Contains(rightPieceIndex),
|
|
|
|
)
|
|
|
|
}
|
2022-05-11 12:15:33 +08:00
|
|
|
leftPiece := &p.pieceStates[leftPieceIndex]
|
|
|
|
rightPiece := &p.pieceStates[rightPieceIndex]
|
2021-12-23 15:44:07 +08:00
|
|
|
// Putting this first means we can steal requests from lesser-performing peers for our first few
|
|
|
|
// new requests.
|
2022-05-06 14:27:43 +08:00
|
|
|
priority := func() piecePriority {
|
2021-12-23 15:44:07 +08:00
|
|
|
// Technically we would be happy with the cached priority here, except we don't actually
|
|
|
|
// cache it anymore, and Torrent.piecePriority just does another lookup of *Piece to resolve
|
|
|
|
// the priority through Piece.purePriority, which is probably slower.
|
2022-05-09 17:37:35 +08:00
|
|
|
leftPriority := leftPiece.Priority
|
|
|
|
rightPriority := rightPiece.Priority
|
2022-05-06 14:27:43 +08:00
|
|
|
ml = ml.Int(
|
|
|
|
-int(leftPriority),
|
|
|
|
-int(rightPriority),
|
|
|
|
)
|
|
|
|
if !ml.Ok() {
|
|
|
|
if leftPriority != rightPriority {
|
|
|
|
panic("expected equal")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return leftPriority
|
|
|
|
}()
|
|
|
|
if ml.Ok() {
|
|
|
|
return ml.MustLess()
|
|
|
|
}
|
2022-05-09 17:34:43 +08:00
|
|
|
leftRequestState := t.requestState[leftRequest]
|
|
|
|
rightRequestState := t.requestState[rightRequest]
|
|
|
|
leftPeer := leftRequestState.peer
|
|
|
|
rightPeer := rightRequestState.peer
|
2022-05-06 14:27:43 +08:00
|
|
|
// Prefer chunks already requested from this peer.
|
2021-12-03 18:30:41 +08:00
|
|
|
ml = ml.Bool(rightPeer == p.peer, leftPeer == p.peer)
|
2022-05-06 14:27:43 +08:00
|
|
|
// Prefer unrequested chunks.
|
2021-12-03 18:30:41 +08:00
|
|
|
ml = ml.Bool(rightPeer == nil, leftPeer == nil)
|
|
|
|
if ml.Ok() {
|
|
|
|
return ml.MustLess()
|
|
|
|
}
|
|
|
|
if leftPeer != nil {
|
2021-12-07 11:17:43 +08:00
|
|
|
// The right peer should also be set, or we'd have resolved the computation by now.
|
2021-12-03 18:30:41 +08:00
|
|
|
ml = ml.Uint64(
|
2021-12-11 21:04:06 +08:00
|
|
|
rightPeer.requestState.Requests.GetCardinality(),
|
|
|
|
leftPeer.requestState.Requests.GetCardinality(),
|
2021-12-03 18:30:41 +08:00
|
|
|
)
|
2021-12-07 11:17:43 +08:00
|
|
|
// Could either of the lastRequested be Zero? That's what checking an existing peer is for.
|
2022-05-09 17:34:43 +08:00
|
|
|
leftLast := leftRequestState.when
|
|
|
|
rightLast := rightRequestState.when
|
2021-12-07 11:17:43 +08:00
|
|
|
if leftLast.IsZero() || rightLast.IsZero() {
|
|
|
|
panic("expected non-zero last requested times")
|
|
|
|
}
|
|
|
|
// We want the most-recently requested on the left. Clients like Transmission serve requests
|
|
|
|
// in received order, so the most recently-requested is the one that has the longest until
|
|
|
|
// it will be served and therefore is the best candidate to cancel.
|
|
|
|
ml = ml.CmpInt64(rightLast.Sub(leftLast).Nanoseconds())
|
2021-12-03 18:30:41 +08:00
|
|
|
}
|
2021-10-07 14:31:10 +08:00
|
|
|
ml = ml.Int(
|
2022-05-09 17:37:35 +08:00
|
|
|
leftPiece.Availability,
|
|
|
|
rightPiece.Availability)
|
2022-05-06 14:27:43 +08:00
|
|
|
if priority == PiecePriorityReadahead {
|
|
|
|
// TODO: For readahead in particular, it would be even better to consider distance from the
|
|
|
|
// reader position so that reads earlier in a torrent don't starve reads later in the
|
|
|
|
// torrent. This would probably require reconsideration of how readahead priority works.
|
|
|
|
ml = ml.Int(leftPieceIndex, rightPieceIndex)
|
|
|
|
} else {
|
2022-05-11 18:40:58 +08:00
|
|
|
ml = ml.Int(t.pieceRequestOrder[leftPieceIndex], t.pieceRequestOrder[rightPieceIndex])
|
2022-05-06 14:27:43 +08:00
|
|
|
}
|
2021-12-03 18:30:41 +08:00
|
|
|
return ml.Less()
|
2021-10-07 14:31:10 +08:00
|
|
|
}
|
|
|
|
|
2022-05-09 09:34:08 +08:00
|
|
|
func (p *desiredPeerRequests) Swap(i, j int) {
|
2021-10-07 14:31:10 +08:00
|
|
|
p.requestIndexes[i], p.requestIndexes[j] = p.requestIndexes[j], p.requestIndexes[i]
|
|
|
|
}
|
|
|
|
|
2022-05-09 09:34:08 +08:00
|
|
|
func (p *desiredPeerRequests) Push(x interface{}) {
|
2021-10-07 14:31:10 +08:00
|
|
|
p.requestIndexes = append(p.requestIndexes, x.(RequestIndex))
|
|
|
|
}
|
|
|
|
|
2022-05-09 09:34:08 +08:00
|
|
|
func (p *desiredPeerRequests) Pop() interface{} {
|
2021-10-07 14:31:10 +08:00
|
|
|
last := len(p.requestIndexes) - 1
|
|
|
|
x := p.requestIndexes[last]
|
|
|
|
p.requestIndexes = p.requestIndexes[:last]
|
|
|
|
return x
|
|
|
|
}
|
|
|
|
|
2021-10-27 07:13:39 +08:00
|
|
|
type desiredRequestState struct {
|
2022-05-09 09:34:08 +08:00
|
|
|
Requests desiredPeerRequests
|
2021-10-27 07:13:39 +08:00
|
|
|
Interested bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Peer) getDesiredRequestState() (desired desiredRequestState) {
|
2022-05-11 12:14:25 +08:00
|
|
|
t := p.t
|
|
|
|
if !t.haveInfo() {
|
2021-12-03 13:41:52 +08:00
|
|
|
return
|
|
|
|
}
|
2022-05-11 12:14:25 +08:00
|
|
|
if t.closed.IsSet() {
|
2021-12-20 11:32:37 +08:00
|
|
|
return
|
|
|
|
}
|
2022-05-11 12:14:25 +08:00
|
|
|
input := t.getRequestStrategyInput()
|
2022-05-09 09:34:08 +08:00
|
|
|
requestHeap := desiredPeerRequests{
|
2022-05-11 12:15:33 +08:00
|
|
|
peer: p,
|
|
|
|
pieceStates: t.requestPieceStates,
|
|
|
|
requestIndexes: t.requestIndexes,
|
2021-10-07 14:31:10 +08:00
|
|
|
}
|
2022-05-11 09:20:52 +08:00
|
|
|
// Caller-provided allocation for roaring bitmap iteration.
|
|
|
|
var it typedRoaring.Iterator[RequestIndex]
|
2021-10-07 14:31:10 +08:00
|
|
|
request_strategy.GetRequestablePieces(
|
|
|
|
input,
|
2022-05-11 12:14:25 +08:00
|
|
|
t.getPieceRequestOrder(),
|
2022-05-09 17:37:35 +08:00
|
|
|
func(ih InfoHash, pieceIndex int, pieceExtra request_strategy.PieceRequestOrderState) {
|
2022-05-11 12:14:25 +08:00
|
|
|
if ih != t.infoHash {
|
2021-10-07 14:31:10 +08:00
|
|
|
return
|
2021-09-18 18:34:14 +08:00
|
|
|
}
|
2021-10-07 14:31:10 +08:00
|
|
|
if !p.peerHasPiece(pieceIndex) {
|
|
|
|
return
|
2021-05-20 18:23:45 +08:00
|
|
|
}
|
2022-05-11 12:15:33 +08:00
|
|
|
requestHeap.pieceStates[pieceIndex] = pieceExtra
|
2022-05-09 09:34:08 +08:00
|
|
|
allowedFast := p.peerAllowedFast.Contains(pieceIndex)
|
2022-05-11 09:20:52 +08:00
|
|
|
t.iterUndirtiedRequestIndexesInPiece(&it, pieceIndex, func(r request_strategy.RequestIndex) {
|
2021-10-11 15:21:24 +08:00
|
|
|
if !allowedFast {
|
2021-12-03 18:30:41 +08:00
|
|
|
// We must signal interest to request this. TODO: We could set interested if the
|
|
|
|
// peers pieces (minus the allowed fast set) overlap with our missing pieces if
|
|
|
|
// there are any readers, or any pending pieces.
|
2021-10-11 15:21:24 +08:00
|
|
|
desired.Interested = true
|
2021-10-25 10:00:56 +08:00
|
|
|
// We can make or will allow sustaining a request here if we're not choked, or
|
|
|
|
// have made the request previously (presumably while unchoked), and haven't had
|
|
|
|
// the peer respond yet (and the request was retained because we are using the
|
|
|
|
// fast extension).
|
2021-12-11 21:04:06 +08:00
|
|
|
if p.peerChoking && !p.requestState.Requests.Contains(r) {
|
2021-10-11 15:21:24 +08:00
|
|
|
// We can't request this right now.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2021-12-11 21:04:06 +08:00
|
|
|
if p.requestState.Cancelled.Contains(r) {
|
2021-12-13 09:12:30 +08:00
|
|
|
// Can't re-request while awaiting acknowledgement.
|
2021-12-11 21:04:06 +08:00
|
|
|
return
|
|
|
|
}
|
2021-10-25 17:53:39 +08:00
|
|
|
requestHeap.requestIndexes = append(requestHeap.requestIndexes, r)
|
2021-10-07 14:31:10 +08:00
|
|
|
})
|
|
|
|
},
|
|
|
|
)
|
2022-05-11 12:14:25 +08:00
|
|
|
t.assertPendingRequests()
|
2021-12-03 18:30:41 +08:00
|
|
|
desired.Requests = requestHeap
|
2021-10-07 14:31:10 +08:00
|
|
|
return
|
|
|
|
}
|
2021-09-20 13:09:28 +08:00
|
|
|
|
2021-12-24 05:55:57 +08:00
|
|
|
func (p *Peer) maybeUpdateActualRequestState() {
|
|
|
|
if p.closed.IsSet() {
|
|
|
|
return
|
|
|
|
}
|
2021-10-08 10:53:36 +08:00
|
|
|
if p.needRequestUpdate == "" {
|
2021-12-24 05:55:57 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if p.needRequestUpdate == peerUpdateRequestsTimerReason {
|
|
|
|
since := time.Since(p.lastRequestUpdate)
|
|
|
|
if since < updateRequestsTimerDuration {
|
|
|
|
panic(since)
|
|
|
|
}
|
2021-10-08 10:53:36 +08:00
|
|
|
}
|
|
|
|
pprof.Do(
|
|
|
|
context.Background(),
|
|
|
|
pprof.Labels("update request", p.needRequestUpdate),
|
|
|
|
func(_ context.Context) {
|
|
|
|
next := p.getDesiredRequestState()
|
2021-12-24 05:55:57 +08:00
|
|
|
p.applyRequestState(next)
|
2022-05-11 12:15:33 +08:00
|
|
|
p.t.requestIndexes = next.Requests.requestIndexes[:0]
|
2021-10-08 10:53:36 +08:00
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2021-10-21 07:28:57 +08:00
|
|
|
// Transmit/action the request state to the peer.
|
2021-12-24 05:55:57 +08:00
|
|
|
func (p *Peer) applyRequestState(next desiredRequestState) {
|
2021-12-11 21:04:06 +08:00
|
|
|
current := &p.requestState
|
2021-09-20 13:09:28 +08:00
|
|
|
if !p.setInterested(next.Interested) {
|
2021-12-24 05:55:57 +08:00
|
|
|
panic("insufficient write buffer")
|
2021-09-18 18:34:14 +08:00
|
|
|
}
|
2021-09-20 13:09:28 +08:00
|
|
|
more := true
|
2022-05-06 14:44:53 +08:00
|
|
|
requestHeap := binheap.FromSlice(next.Requests.requestIndexes, next.Requests.lessByValue)
|
2021-12-10 15:04:45 +08:00
|
|
|
t := p.t
|
2021-12-24 05:55:57 +08:00
|
|
|
originalRequestCount := current.Requests.GetCardinality()
|
|
|
|
// We're either here on a timer, or because we ran out of requests. Both are valid reasons to
|
|
|
|
// alter peakRequests.
|
|
|
|
if originalRequestCount != 0 && p.needRequestUpdate != peerUpdateRequestsTimerReason {
|
|
|
|
panic(fmt.Sprintf(
|
|
|
|
"expected zero existing requests (%v) for update reason %q",
|
|
|
|
originalRequestCount, p.needRequestUpdate))
|
|
|
|
}
|
2021-12-23 15:55:57 +08:00
|
|
|
for requestHeap.Len() != 0 && maxRequests(current.Requests.GetCardinality()+current.Cancelled.GetCardinality()) < p.nominalMaxRequests() {
|
2022-05-06 14:44:53 +08:00
|
|
|
req := requestHeap.Pop()
|
2021-12-10 15:04:45 +08:00
|
|
|
existing := t.requestingPeer(req)
|
2021-12-07 11:19:44 +08:00
|
|
|
if existing != nil && existing != p {
|
|
|
|
// Don't steal from the poor.
|
|
|
|
diff := int64(current.Requests.GetCardinality()) + 1 - (int64(existing.uncancelledRequests()) - 1)
|
|
|
|
// Steal a request that leaves us with one more request than the existing peer
|
|
|
|
// connection if the stealer more recently received a chunk.
|
|
|
|
if diff > 1 || (diff == 1 && p.lastUsefulChunkReceived.Before(existing.lastUsefulChunkReceived)) {
|
|
|
|
continue
|
|
|
|
}
|
2021-12-10 15:04:45 +08:00
|
|
|
t.cancelRequest(req)
|
2021-12-12 13:56:00 +08:00
|
|
|
}
|
2021-10-26 12:03:04 +08:00
|
|
|
more = p.mustRequest(req)
|
2021-10-18 18:52:09 +08:00
|
|
|
if !more {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2021-12-24 05:55:57 +08:00
|
|
|
if !more {
|
|
|
|
// This might fail if we incorrectly determine that we can fit up to the maximum allowed
|
|
|
|
// requests into the available write buffer space. We don't want that to happen because it
|
|
|
|
// makes our peak requests dependent on how much was already in the buffer.
|
|
|
|
panic(fmt.Sprintf(
|
|
|
|
"couldn't fill apply entire request state [newRequests=%v]",
|
|
|
|
current.Requests.GetCardinality()-originalRequestCount))
|
2021-10-08 10:53:36 +08:00
|
|
|
}
|
2021-12-24 05:55:57 +08:00
|
|
|
newPeakRequests := maxRequests(current.Requests.GetCardinality() - originalRequestCount)
|
|
|
|
// log.Printf(
|
|
|
|
// "requests %v->%v (peak %v->%v) reason %q (peer %v)",
|
|
|
|
// originalRequestCount, current.Requests.GetCardinality(), p.peakRequests, newPeakRequests, p.needRequestUpdate, p)
|
|
|
|
p.peakRequests = newPeakRequests
|
|
|
|
p.needRequestUpdate = ""
|
|
|
|
p.lastRequestUpdate = time.Now()
|
2022-05-11 14:08:19 +08:00
|
|
|
if enableUpdateRequestsTimer {
|
|
|
|
p.updateRequestsTimer.Reset(updateRequestsTimerDuration)
|
|
|
|
}
|
2020-01-24 14:55:20 +08:00
|
|
|
}
|
2021-12-24 05:55:57 +08:00
|
|
|
|
|
|
|
// This could be set to 10s to match the unchoke/request update interval recommended by some
|
|
|
|
// specifications. I've set it shorter to trigger it more often for testing for now.
|
2022-05-11 14:08:19 +08:00
|
|
|
const (
|
|
|
|
updateRequestsTimerDuration = 3 * time.Second
|
|
|
|
enableUpdateRequestsTimer = false
|
|
|
|
)
|