2020-05-30 15:52:27 +08:00
|
|
|
package torrent
|
|
|
|
|
|
|
|
import (
|
2021-02-09 16:21:54 +08:00
|
|
|
"context"
|
|
|
|
"errors"
|
2020-06-04 09:50:20 +08:00
|
|
|
"fmt"
|
2021-12-06 12:14:59 +08:00
|
|
|
"math/rand"
|
2021-01-29 13:01:35 +08:00
|
|
|
"sync"
|
2021-12-06 12:14:59 +08:00
|
|
|
"time"
|
2020-06-01 16:25:45 +08:00
|
|
|
|
2021-11-12 09:37:40 +08:00
|
|
|
"github.com/RoaringBitmap/roaring"
|
2021-10-21 07:28:57 +08:00
|
|
|
"github.com/anacrolix/log"
|
2020-06-02 14:18:25 +08:00
|
|
|
"github.com/anacrolix/torrent/metainfo"
|
2020-06-01 16:41:21 +08:00
|
|
|
pp "github.com/anacrolix/torrent/peer_protocol"
|
2020-06-01 16:25:45 +08:00
|
|
|
"github.com/anacrolix/torrent/webseed"
|
2020-05-30 15:52:27 +08:00
|
|
|
)
|
|
|
|
|
2020-06-04 09:58:18 +08:00
|
|
|
type webseedPeer struct {
|
2022-01-31 09:53:44 +08:00
|
|
|
// First field for stats alignment.
|
|
|
|
peer Peer
|
2021-01-29 13:01:35 +08:00
|
|
|
client webseed.Client
|
|
|
|
activeRequests map[Request]webseed.Request
|
|
|
|
requesterCond sync.Cond
|
2021-10-21 07:48:43 +08:00
|
|
|
// Number of requester routines.
|
|
|
|
maxRequests int
|
2020-06-01 16:25:45 +08:00
|
|
|
}
|
|
|
|
|
2020-06-04 09:58:18 +08:00
|
|
|
var _ peerImpl = (*webseedPeer)(nil)
|
2020-06-01 16:25:45 +08:00
|
|
|
|
2020-09-29 14:21:54 +08:00
|
|
|
func (me *webseedPeer) connStatusString() string {
|
|
|
|
return me.client.Url
|
|
|
|
}
|
|
|
|
|
2020-06-04 09:58:18 +08:00
|
|
|
func (ws *webseedPeer) String() string {
|
2020-06-04 09:50:20 +08:00
|
|
|
return fmt.Sprintf("webseed peer for %q", ws.client.Url)
|
|
|
|
}
|
|
|
|
|
2020-06-04 09:58:18 +08:00
|
|
|
func (ws *webseedPeer) onGotInfo(info *metainfo.Info) {
|
2021-11-12 09:37:40 +08:00
|
|
|
ws.client.SetInfo(info)
|
2021-11-12 10:41:55 +08:00
|
|
|
// There should be probably be a callback in Client instead, so it can remove pieces at its whim
|
|
|
|
// too.
|
|
|
|
ws.client.Pieces.Iterate(func(x uint32) bool {
|
|
|
|
ws.peer.t.incPieceAvailability(pieceIndex(x))
|
|
|
|
return true
|
|
|
|
})
|
2020-06-02 14:18:25 +08:00
|
|
|
}
|
|
|
|
|
2020-06-04 09:58:18 +08:00
|
|
|
func (ws *webseedPeer) writeInterested(interested bool) bool {
|
2020-05-30 15:52:27 +08:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2021-12-11 21:04:06 +08:00
|
|
|
func (ws *webseedPeer) _cancel(r RequestIndex) bool {
|
|
|
|
if active, ok := ws.activeRequests[ws.peer.t.requestIndexToRequest(r)]; ok {
|
2021-05-09 21:38:38 +08:00
|
|
|
active.Cancel()
|
2021-12-11 21:04:06 +08:00
|
|
|
// The requester is running and will handle the result.
|
|
|
|
return true
|
2021-12-02 10:10:54 +08:00
|
|
|
}
|
2021-12-11 21:04:06 +08:00
|
|
|
// There should be no requester handling this, so no further events will occur.
|
|
|
|
return false
|
2020-05-30 15:52:27 +08:00
|
|
|
}
|
|
|
|
|
2021-01-28 11:23:22 +08:00
|
|
|
func (ws *webseedPeer) intoSpec(r Request) webseed.RequestSpec {
|
2020-06-02 11:54:26 +08:00
|
|
|
return webseed.RequestSpec{ws.peer.t.requestOffset(r), int64(r.Length)}
|
|
|
|
}
|
|
|
|
|
2021-05-20 18:23:45 +08:00
|
|
|
func (ws *webseedPeer) _request(r Request) bool {
|
2021-01-29 13:01:35 +08:00
|
|
|
ws.requesterCond.Signal()
|
2021-05-20 18:23:45 +08:00
|
|
|
return true
|
2020-05-30 15:52:27 +08:00
|
|
|
}
|
|
|
|
|
2021-12-06 12:14:59 +08:00
|
|
|
func (ws *webseedPeer) doRequest(r Request) error {
|
2021-01-29 13:01:35 +08:00
|
|
|
webseedRequest := ws.client.NewRequest(ws.intoSpec(r))
|
|
|
|
ws.activeRequests[r] = webseedRequest
|
2021-12-06 12:14:59 +08:00
|
|
|
err := func() error {
|
2021-02-09 16:21:54 +08:00
|
|
|
ws.requesterCond.L.Unlock()
|
|
|
|
defer ws.requesterCond.L.Lock()
|
2021-12-06 12:14:59 +08:00
|
|
|
return ws.requestResultHandler(r, webseedRequest)
|
2021-02-09 16:21:54 +08:00
|
|
|
}()
|
2021-01-29 13:01:35 +08:00
|
|
|
delete(ws.activeRequests, r)
|
2021-12-06 12:14:59 +08:00
|
|
|
return err
|
2021-01-29 13:01:35 +08:00
|
|
|
}
|
|
|
|
|
2021-12-06 12:14:59 +08:00
|
|
|
func (ws *webseedPeer) requester(i int) {
|
2021-01-29 13:01:35 +08:00
|
|
|
ws.requesterCond.L.Lock()
|
|
|
|
defer ws.requesterCond.L.Unlock()
|
|
|
|
start:
|
|
|
|
for !ws.peer.closed.IsSet() {
|
2021-09-19 13:16:37 +08:00
|
|
|
restart := false
|
2021-12-11 21:04:06 +08:00
|
|
|
ws.peer.requestState.Requests.Iterate(func(x uint32) bool {
|
2021-09-19 13:16:37 +08:00
|
|
|
r := ws.peer.t.requestIndexToRequest(x)
|
2021-01-29 13:01:35 +08:00
|
|
|
if _, ok := ws.activeRequests[r]; ok {
|
2021-09-19 13:16:37 +08:00
|
|
|
return true
|
2021-01-29 13:01:35 +08:00
|
|
|
}
|
2021-12-06 12:14:59 +08:00
|
|
|
err := ws.doRequest(r)
|
|
|
|
ws.requesterCond.L.Unlock()
|
2021-12-06 16:24:48 +08:00
|
|
|
if err != nil && !errors.Is(err, context.Canceled) {
|
2021-12-06 12:14:59 +08:00
|
|
|
log.Printf("requester %v: error doing webseed request %v: %v", i, r, err)
|
|
|
|
}
|
2021-09-19 13:16:37 +08:00
|
|
|
restart = true
|
2021-12-06 12:14:59 +08:00
|
|
|
if errors.Is(err, webseed.ErrTooFast) {
|
|
|
|
time.Sleep(time.Duration(rand.Int63n(int64(10 * time.Second))))
|
|
|
|
}
|
|
|
|
ws.requesterCond.L.Lock()
|
2021-09-19 13:16:37 +08:00
|
|
|
return false
|
|
|
|
})
|
|
|
|
if restart {
|
2021-01-29 13:01:35 +08:00
|
|
|
goto start
|
|
|
|
}
|
|
|
|
ws.requesterCond.Wait()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-04 09:58:18 +08:00
|
|
|
func (ws *webseedPeer) connectionFlags() string {
|
2020-05-30 15:52:27 +08:00
|
|
|
return "WS"
|
|
|
|
}
|
|
|
|
|
2022-03-11 10:33:34 +08:00
|
|
|
// Maybe this should drop all existing connections, or something like that.
|
2020-06-04 09:58:18 +08:00
|
|
|
func (ws *webseedPeer) drop() {}
|
2020-05-30 15:52:27 +08:00
|
|
|
|
2022-03-11 10:33:34 +08:00
|
|
|
func (cn *webseedPeer) ban() {
|
|
|
|
cn.peer.close()
|
|
|
|
}
|
|
|
|
|
2021-10-21 07:28:57 +08:00
|
|
|
func (ws *webseedPeer) handleUpdateRequests() {
|
2021-12-02 10:48:52 +08:00
|
|
|
// Because this is synchronous, webseed peers seem to get first dibs on newly prioritized
|
|
|
|
// pieces.
|
2021-12-06 16:24:04 +08:00
|
|
|
go func() {
|
|
|
|
ws.peer.t.cl.lock()
|
|
|
|
defer ws.peer.t.cl.unlock()
|
|
|
|
ws.peer.maybeUpdateActualRequestState()
|
|
|
|
}()
|
2020-05-30 15:52:27 +08:00
|
|
|
}
|
2020-05-31 11:09:56 +08:00
|
|
|
|
2021-01-29 13:01:35 +08:00
|
|
|
func (ws *webseedPeer) onClose() {
|
2022-01-23 06:37:11 +08:00
|
|
|
ws.peer.logger.Levelf(log.Debug, "closing")
|
2021-12-13 09:11:38 +08:00
|
|
|
// Just deleting them means we would have to manually cancel active requests.
|
|
|
|
ws.peer.cancelAllRequests()
|
2021-12-13 09:09:12 +08:00
|
|
|
ws.peer.t.iterPeers(func(p *Peer) {
|
|
|
|
if p.isLowOnRequests() {
|
|
|
|
p.updateRequests("webseedPeer.onClose")
|
|
|
|
}
|
|
|
|
})
|
2021-01-29 13:01:35 +08:00
|
|
|
ws.requesterCond.Broadcast()
|
|
|
|
}
|
2020-06-01 16:41:21 +08:00
|
|
|
|
2021-12-06 12:14:59 +08:00
|
|
|
func (ws *webseedPeer) requestResultHandler(r Request, webseedRequest webseed.Request) error {
|
2020-06-02 14:41:49 +08:00
|
|
|
result := <-webseedRequest.Result
|
2021-12-02 10:47:06 +08:00
|
|
|
close(webseedRequest.Result) // one-shot
|
2021-05-21 09:49:57 +08:00
|
|
|
// We do this here rather than inside receiveChunk, since we want to count errors too. I'm not
|
|
|
|
// sure if we can divine which errors indicate cancellation on our end without hitting the
|
|
|
|
// network though.
|
2021-12-02 10:47:06 +08:00
|
|
|
if len(result.Bytes) != 0 || result.Err == nil {
|
|
|
|
// Increment ChunksRead and friends
|
|
|
|
ws.peer.doChunkReadStats(int64(len(result.Bytes)))
|
|
|
|
}
|
2021-10-25 13:17:55 +08:00
|
|
|
ws.peer.readBytes(int64(len(result.Bytes)))
|
2020-06-02 11:54:26 +08:00
|
|
|
ws.peer.t.cl.lock()
|
2020-06-02 14:41:49 +08:00
|
|
|
defer ws.peer.t.cl.unlock()
|
2021-12-03 18:31:16 +08:00
|
|
|
if ws.peer.t.closed.IsSet() {
|
2021-12-06 12:14:59 +08:00
|
|
|
return nil
|
2021-12-03 18:31:16 +08:00
|
|
|
}
|
2021-12-06 12:14:59 +08:00
|
|
|
err := result.Err
|
|
|
|
if err != nil {
|
|
|
|
switch {
|
|
|
|
case errors.Is(err, context.Canceled):
|
|
|
|
case errors.Is(err, webseed.ErrTooFast):
|
|
|
|
case ws.peer.closed.IsSet():
|
|
|
|
default:
|
2021-02-09 16:21:54 +08:00
|
|
|
ws.peer.logger.Printf("Request %v rejected: %v", r, result.Err)
|
2021-12-02 10:47:06 +08:00
|
|
|
// // Here lies my attempt to extract something concrete from Go's error system. RIP.
|
2021-11-12 11:43:22 +08:00
|
|
|
// cfg := spew.NewDefaultConfig()
|
|
|
|
// cfg.DisableMethods = true
|
|
|
|
// cfg.Dump(result.Err)
|
|
|
|
log.Printf("closing %v", ws)
|
2020-07-10 11:18:33 +08:00
|
|
|
ws.peer.close()
|
|
|
|
}
|
2021-12-11 21:04:06 +08:00
|
|
|
if !ws.peer.remoteRejectedRequest(ws.peer.t.requestIndexFromRequest(r)) {
|
|
|
|
panic("invalid reject")
|
|
|
|
}
|
2021-12-06 12:14:59 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = ws.peer.receiveChunk(&pp.Message{
|
|
|
|
Type: pp.Piece,
|
|
|
|
Index: r.Index,
|
|
|
|
Begin: r.Begin,
|
|
|
|
Piece: result.Bytes,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
2020-06-01 16:41:21 +08:00
|
|
|
}
|
2021-12-06 12:14:59 +08:00
|
|
|
return err
|
2020-06-01 16:41:21 +08:00
|
|
|
}
|
2021-10-21 07:48:43 +08:00
|
|
|
|
2021-11-12 09:37:40 +08:00
|
|
|
func (me *webseedPeer) peerPieces() *roaring.Bitmap {
|
|
|
|
return &me.client.Pieces
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cn *webseedPeer) peerHasAllPieces() (all, known bool) {
|
|
|
|
if !cn.peer.t.haveInfo() {
|
|
|
|
return true, false
|
|
|
|
}
|
|
|
|
return cn.client.Pieces.GetCardinality() == uint64(cn.peer.t.numPieces()), true
|
|
|
|
}
|