2020-05-31 19:00:44 +08:00
|
|
|
package webseed
|
|
|
|
|
|
|
|
import (
|
2020-06-01 16:25:45 +08:00
|
|
|
"bytes"
|
|
|
|
"context"
|
2021-12-06 12:14:59 +08:00
|
|
|
"errors"
|
2020-06-01 16:25:45 +08:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2021-11-12 10:30:13 +08:00
|
|
|
"log"
|
2020-05-31 19:00:44 +08:00
|
|
|
"net/http"
|
2021-11-12 09:37:40 +08:00
|
|
|
"strings"
|
2020-05-31 19:00:44 +08:00
|
|
|
|
2021-11-12 09:37:40 +08:00
|
|
|
"github.com/RoaringBitmap/roaring"
|
|
|
|
"github.com/anacrolix/torrent/common"
|
2020-06-01 16:25:45 +08:00
|
|
|
"github.com/anacrolix/torrent/metainfo"
|
|
|
|
"github.com/anacrolix/torrent/segments"
|
2020-05-31 19:00:44 +08:00
|
|
|
)
|
|
|
|
|
2020-06-01 16:25:45 +08:00
|
|
|
type RequestSpec = segments.Extent
|
|
|
|
|
2020-06-02 11:54:26 +08:00
|
|
|
type requestPartResult struct {
|
2020-06-01 16:25:45 +08:00
|
|
|
resp *http.Response
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
|
|
|
type requestPart struct {
|
|
|
|
req *http.Request
|
|
|
|
e segments.Extent
|
2020-06-02 11:54:26 +08:00
|
|
|
result chan requestPartResult
|
2021-12-02 10:47:06 +08:00
|
|
|
start func()
|
2021-12-20 11:29:43 +08:00
|
|
|
// Wrap http response bodies for such things as download rate limiting.
|
|
|
|
responseBodyWrapper ResponseBodyWrapper
|
2020-06-01 16:25:45 +08:00
|
|
|
}
|
|
|
|
|
2020-06-02 11:54:26 +08:00
|
|
|
type Request struct {
|
2020-06-01 16:25:45 +08:00
|
|
|
cancel func()
|
2020-06-02 11:54:26 +08:00
|
|
|
Result chan RequestResult
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r Request) Cancel() {
|
|
|
|
r.cancel()
|
2020-06-01 16:25:45 +08:00
|
|
|
}
|
2020-05-31 19:00:44 +08:00
|
|
|
|
2022-04-21 22:21:29 +08:00
|
|
|
type Spec struct {
|
|
|
|
Urls []string
|
|
|
|
EncodeUrl func(string) string
|
|
|
|
}
|
|
|
|
|
2020-05-31 19:00:44 +08:00
|
|
|
type Client struct {
|
|
|
|
HttpClient *http.Client
|
|
|
|
Url string
|
2021-11-12 09:37:40 +08:00
|
|
|
fileIndex segments.Index
|
|
|
|
info *metainfo.Info
|
|
|
|
// The pieces we can request with the Url. We're more likely to ban/block at the file-level
|
|
|
|
// given that's how requests are mapped to webseeds, but the torrent.Client works at the piece
|
2021-11-12 10:41:55 +08:00
|
|
|
// level. We can map our file-level adjustments to the pieces here. This probably need to be
|
|
|
|
// private in the future, if Client ever starts removing pieces.
|
2021-12-20 11:29:43 +08:00
|
|
|
Pieces roaring.Bitmap
|
|
|
|
ResponseBodyWrapper ResponseBodyWrapper
|
2022-04-22 10:23:43 +08:00
|
|
|
PathEscaper PathEscaper
|
2021-11-12 09:37:40 +08:00
|
|
|
}
|
|
|
|
|
2021-12-20 11:29:43 +08:00
|
|
|
type ResponseBodyWrapper func(io.Reader) io.Reader
|
|
|
|
|
2021-11-12 09:37:40 +08:00
|
|
|
func (me *Client) SetInfo(info *metainfo.Info) {
|
|
|
|
if !strings.HasSuffix(me.Url, "/") && info.IsDir() {
|
|
|
|
// In my experience, this is a non-conforming webseed. For example the
|
|
|
|
// http://ia600500.us.archive.org/1/items URLs in archive.org torrents.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
me.fileIndex = segments.NewIndex(common.LengthIterFromUpvertedFiles(info.UpvertedFiles()))
|
|
|
|
me.info = info
|
|
|
|
me.Pieces.AddRange(0, uint64(info.NumPieces()))
|
2020-06-01 16:25:45 +08:00
|
|
|
}
|
|
|
|
|
2020-06-02 11:54:26 +08:00
|
|
|
type RequestResult struct {
|
|
|
|
Bytes []byte
|
|
|
|
Err error
|
2020-06-01 16:25:45 +08:00
|
|
|
}
|
|
|
|
|
2020-06-02 11:54:26 +08:00
|
|
|
func (ws *Client) NewRequest(r RequestSpec) Request {
|
2020-06-01 16:25:45 +08:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
var requestParts []requestPart
|
2021-11-12 09:37:40 +08:00
|
|
|
if !ws.fileIndex.Locate(r, func(i int, e segments.Extent) bool {
|
2022-04-22 10:23:43 +08:00
|
|
|
req, err := NewRequestWithOpts(
|
2022-04-21 22:21:29 +08:00
|
|
|
ws.Url, i, ws.info, e.Start, e.Length,
|
2022-04-22 10:23:43 +08:00
|
|
|
ws.PathEscaper,
|
2022-04-21 22:21:29 +08:00
|
|
|
)
|
2020-06-01 16:25:45 +08:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
req = req.WithContext(ctx)
|
|
|
|
part := requestPart{
|
2021-12-20 11:29:43 +08:00
|
|
|
req: req,
|
|
|
|
result: make(chan requestPartResult, 1),
|
|
|
|
e: e,
|
|
|
|
responseBodyWrapper: ws.ResponseBodyWrapper,
|
2020-06-01 16:25:45 +08:00
|
|
|
}
|
2021-12-02 10:47:06 +08:00
|
|
|
part.start = func() {
|
|
|
|
go func() {
|
|
|
|
resp, err := ws.HttpClient.Do(req)
|
|
|
|
part.result <- requestPartResult{
|
|
|
|
resp: resp,
|
|
|
|
err: err,
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
2020-06-01 16:25:45 +08:00
|
|
|
requestParts = append(requestParts, part)
|
|
|
|
return true
|
|
|
|
}) {
|
|
|
|
panic("request out of file bounds")
|
|
|
|
}
|
2020-06-02 11:54:26 +08:00
|
|
|
req := Request{
|
|
|
|
cancel: cancel,
|
|
|
|
Result: make(chan RequestResult, 1),
|
2020-06-01 16:25:45 +08:00
|
|
|
}
|
|
|
|
go func() {
|
2021-11-12 09:40:15 +08:00
|
|
|
b, err := readRequestPartResponses(ctx, requestParts)
|
2020-06-02 11:54:26 +08:00
|
|
|
req.Result <- RequestResult{
|
|
|
|
Bytes: b,
|
|
|
|
Err: err,
|
2020-06-01 16:25:45 +08:00
|
|
|
}
|
|
|
|
}()
|
2020-06-02 11:54:26 +08:00
|
|
|
return req
|
2020-06-01 16:25:45 +08:00
|
|
|
}
|
|
|
|
|
2021-02-18 11:36:08 +08:00
|
|
|
type ErrBadResponse struct {
|
|
|
|
Msg string
|
|
|
|
Response *http.Response
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me ErrBadResponse) Error() string {
|
|
|
|
return me.Msg
|
|
|
|
}
|
|
|
|
|
2021-11-12 09:40:15 +08:00
|
|
|
func recvPartResult(ctx context.Context, buf io.Writer, part requestPart) error {
|
2020-06-01 16:25:45 +08:00
|
|
|
result := <-part.result
|
2021-12-02 10:47:06 +08:00
|
|
|
// Make sure there's no further results coming, it should be a one-shot channel.
|
|
|
|
close(part.result)
|
2020-06-01 16:25:45 +08:00
|
|
|
if result.err != nil {
|
|
|
|
return result.err
|
|
|
|
}
|
|
|
|
defer result.resp.Body.Close()
|
2021-12-20 11:29:43 +08:00
|
|
|
var body io.Reader = result.resp.Body
|
|
|
|
if part.responseBodyWrapper != nil {
|
|
|
|
body = part.responseBodyWrapper(body)
|
|
|
|
}
|
|
|
|
// Prevent further accidental use
|
|
|
|
result.resp.Body = nil
|
2021-11-12 09:40:15 +08:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
2020-10-15 09:42:27 +08:00
|
|
|
switch result.resp.StatusCode {
|
|
|
|
case http.StatusPartialContent:
|
2021-12-20 11:29:43 +08:00
|
|
|
copied, err := io.Copy(buf, body)
|
2021-11-12 10:30:13 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if copied != part.e.Length {
|
|
|
|
return fmt.Errorf("got %v bytes, expected %v", copied, part.e.Length)
|
|
|
|
}
|
|
|
|
return nil
|
2020-10-15 09:42:27 +08:00
|
|
|
case http.StatusOK:
|
2021-11-12 10:30:13 +08:00
|
|
|
// This number is based on
|
|
|
|
// https://archive.org/download/BloodyPitOfHorror/BloodyPitOfHorror.asr.srt. It seems that
|
|
|
|
// archive.org might be using a webserver implementation that refuses to do partial
|
|
|
|
// responses to small files.
|
|
|
|
if part.e.Start < 48<<10 {
|
2021-11-12 12:58:32 +08:00
|
|
|
if part.e.Start != 0 {
|
|
|
|
log.Printf("resp status ok but requested range [url=%q, range=%q]",
|
|
|
|
part.req.URL,
|
|
|
|
part.req.Header.Get("Range"))
|
|
|
|
}
|
2021-11-14 11:01:46 +08:00
|
|
|
// Instead of discarding, we could try receiving all the chunks present in the response
|
|
|
|
// body. I don't know how one would handle multiple chunk requests resulting in an OK
|
|
|
|
// response for the same file. The request algorithm might be need to be smarter for
|
|
|
|
// that.
|
2021-12-20 11:29:43 +08:00
|
|
|
discarded, _ := io.CopyN(io.Discard, body, part.e.Start)
|
2021-11-12 10:30:13 +08:00
|
|
|
if discarded != 0 {
|
|
|
|
log.Printf("discarded %v bytes in webseed request response part", discarded)
|
|
|
|
}
|
2021-12-20 11:29:43 +08:00
|
|
|
_, err := io.CopyN(buf, body, part.e.Length)
|
2021-11-12 10:30:13 +08:00
|
|
|
return err
|
|
|
|
} else {
|
|
|
|
return ErrBadResponse{"resp status ok but requested range", result.resp}
|
2020-10-15 09:42:27 +08:00
|
|
|
}
|
2021-12-06 12:14:59 +08:00
|
|
|
case http.StatusServiceUnavailable:
|
|
|
|
return ErrTooFast
|
2020-10-15 09:42:27 +08:00
|
|
|
default:
|
2021-02-18 11:36:08 +08:00
|
|
|
return ErrBadResponse{
|
|
|
|
fmt.Sprintf("unhandled response status code (%v)", result.resp.StatusCode),
|
|
|
|
result.resp,
|
|
|
|
}
|
2020-06-01 16:25:45 +08:00
|
|
|
}
|
2020-05-31 19:00:44 +08:00
|
|
|
}
|
|
|
|
|
2021-12-06 12:14:59 +08:00
|
|
|
var ErrTooFast = errors.New("making requests too fast")
|
|
|
|
|
2021-12-02 10:47:06 +08:00
|
|
|
func readRequestPartResponses(ctx context.Context, parts []requestPart) (_ []byte, err error) {
|
2020-06-01 16:25:45 +08:00
|
|
|
var buf bytes.Buffer
|
2021-12-02 10:47:06 +08:00
|
|
|
for _, part := range parts {
|
|
|
|
part.start()
|
|
|
|
err = recvPartResult(ctx, &buf, part)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("reading %q at %q: %w", part.req.URL, part.req.Header.Get("Range"), err)
|
|
|
|
break
|
2021-11-12 09:40:15 +08:00
|
|
|
}
|
2021-12-02 10:47:06 +08:00
|
|
|
}
|
2021-11-12 09:40:15 +08:00
|
|
|
return buf.Bytes(), err
|
2020-05-31 19:00:44 +08:00
|
|
|
}
|