FedP2P/webseed-peer.go

161 lines
4.1 KiB
Go
Raw Normal View History

2020-05-30 15:52:27 +08:00
package torrent
import (
2021-02-09 16:21:54 +08:00
"context"
"errors"
"fmt"
"net/http"
"strings"
"sync"
2020-06-01 16:25:45 +08:00
2021-10-21 07:28:57 +08:00
"github.com/anacrolix/log"
"github.com/anacrolix/torrent/common"
"github.com/anacrolix/torrent/metainfo"
2020-06-01 16:41:21 +08:00
pp "github.com/anacrolix/torrent/peer_protocol"
2020-06-01 16:25:45 +08:00
"github.com/anacrolix/torrent/segments"
"github.com/anacrolix/torrent/webseed"
2020-05-30 15:52:27 +08:00
)
type webseedPeer struct {
client webseed.Client
activeRequests map[Request]webseed.Request
requesterCond sync.Cond
peer Peer
2020-06-01 16:25:45 +08:00
}
var _ peerImpl = (*webseedPeer)(nil)
2020-06-01 16:25:45 +08:00
func (me *webseedPeer) connStatusString() string {
return me.client.Url
}
func (ws *webseedPeer) String() string {
return fmt.Sprintf("webseed peer for %q", ws.client.Url)
}
func (ws *webseedPeer) onGotInfo(info *metainfo.Info) {
ws.client.FileIndex = segments.NewIndex(common.LengthIterFromUpvertedFiles(info.UpvertedFiles()))
ws.client.Info = info
}
func (ws *webseedPeer) writeInterested(interested bool) bool {
2020-05-30 15:52:27 +08:00
return true
}
func (ws *webseedPeer) _cancel(r RequestIndex) bool {
active, ok := ws.activeRequests[ws.peer.t.requestIndexToRequest(r)]
2021-05-09 21:38:38 +08:00
if ok {
active.Cancel()
if !ws.peer.deleteRequest(r) {
panic("cancelled webseed request should exist")
}
if ws.peer.actualRequestState.Requests.IsEmpty() {
ws.peer.updateRequests("webseedPeer._cancel")
}
}
return true
2020-05-30 15:52:27 +08:00
}
func (ws *webseedPeer) intoSpec(r Request) webseed.RequestSpec {
return webseed.RequestSpec{ws.peer.t.requestOffset(r), int64(r.Length)}
}
func (ws *webseedPeer) _request(r Request) bool {
ws.requesterCond.Signal()
return true
2020-05-30 15:52:27 +08:00
}
func (ws *webseedPeer) doRequest(r Request) {
webseedRequest := ws.client.NewRequest(ws.intoSpec(r))
ws.activeRequests[r] = webseedRequest
2021-02-09 16:21:54 +08:00
func() {
ws.requesterCond.L.Unlock()
defer ws.requesterCond.L.Lock()
ws.requestResultHandler(r, webseedRequest)
}()
delete(ws.activeRequests, r)
}
func (ws *webseedPeer) requester() {
ws.requesterCond.L.Lock()
defer ws.requesterCond.L.Unlock()
start:
for !ws.peer.closed.IsSet() {
2021-09-19 13:16:37 +08:00
restart := false
ws.peer.actualRequestState.Requests.Iterate(func(x uint32) bool {
r := ws.peer.t.requestIndexToRequest(x)
if _, ok := ws.activeRequests[r]; ok {
2021-09-19 13:16:37 +08:00
return true
}
ws.doRequest(r)
2021-09-19 13:16:37 +08:00
restart = true
return false
})
if restart {
goto start
}
ws.requesterCond.Wait()
}
}
func (ws *webseedPeer) connectionFlags() string {
2020-05-30 15:52:27 +08:00
return "WS"
}
2021-01-04 12:51:23 +08:00
// TODO: This is called when banning peers. Perhaps we want to be able to ban webseeds too. We could
// return bool if this is even possible, and if it isn't, skip to the next drop candidate.
func (ws *webseedPeer) drop() {}
2020-05-30 15:52:27 +08:00
2021-10-21 07:28:57 +08:00
func (ws *webseedPeer) handleUpdateRequests() {
ws.peer.maybeUpdateActualRequestState()
2020-05-30 15:52:27 +08:00
}
2020-05-31 11:09:56 +08:00
func (ws *webseedPeer) onClose() {
2021-10-21 07:28:57 +08:00
ws.peer.logger.WithLevel(log.Debug).Print("closing")
2021-02-09 16:21:54 +08:00
for _, r := range ws.activeRequests {
r.Cancel()
}
ws.requesterCond.Broadcast()
}
2020-06-01 16:41:21 +08:00
func (ws *webseedPeer) requestResultHandler(r Request, webseedRequest webseed.Request) {
2020-06-02 14:41:49 +08:00
result := <-webseedRequest.Result
// We do this here rather than inside receiveChunk, since we want to count errors too. I'm not
// sure if we can divine which errors indicate cancellation on our end without hitting the
// network though.
ws.peer.doChunkReadStats(int64(len(result.Bytes)))
ws.peer.t.cl.lock()
2020-06-02 14:41:49 +08:00
defer ws.peer.t.cl.unlock()
if result.Err != nil {
2021-02-09 16:21:54 +08:00
if !errors.Is(result.Err, context.Canceled) {
ws.peer.logger.Printf("Request %v rejected: %v", r, result.Err)
}
// We need to filter out temporary errors, but this is a nightmare in Go. Currently a bad
// webseed URL can starve out the good ones due to the chunk selection algorithm.
const closeOnAllErrors = false
if closeOnAllErrors ||
strings.Contains(result.Err.Error(), "unsupported protocol scheme") ||
func() bool {
var err webseed.ErrBadResponse
if !errors.As(result.Err, &err) {
return false
}
return err.Response.StatusCode == http.StatusNotFound
}() {
ws.peer.close()
} else {
2021-09-19 13:16:37 +08:00
ws.peer.remoteRejectedRequest(ws.peer.t.requestIndexFromRequest(r))
}
2020-06-02 14:41:49 +08:00
} else {
err := ws.peer.receiveChunk(&pp.Message{
Type: pp.Piece,
Index: r.Index,
Begin: r.Begin,
Piece: result.Bytes,
})
if err != nil {
panic(err)
}
2020-06-01 16:41:21 +08:00
}
}