2016-09-02 13:10:57 +08:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"io"
|
|
|
|
"os"
|
|
|
|
|
|
|
|
"github.com/anacrolix/missinggo"
|
2019-08-21 18:58:40 +08:00
|
|
|
|
2016-09-02 13:10:57 +08:00
|
|
|
"github.com/anacrolix/torrent/metainfo"
|
|
|
|
)
|
|
|
|
|
|
|
|
type Client struct {
|
2017-06-01 20:57:08 +08:00
|
|
|
ci ClientImpl
|
2016-09-02 13:10:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func NewClient(cl ClientImpl) *Client {
|
|
|
|
return &Client{cl}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl Client) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (*Torrent, error) {
|
2017-06-01 20:57:08 +08:00
|
|
|
t, err := cl.ci.OpenTorrent(info, infoHash)
|
2020-05-21 10:17:10 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &Torrent{t}, nil
|
2016-09-02 13:10:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type Torrent struct {
|
|
|
|
TorrentImpl
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t Torrent) Piece(p metainfo.Piece) Piece {
|
|
|
|
return Piece{t.TorrentImpl.Piece(p), p}
|
|
|
|
}
|
|
|
|
|
|
|
|
type Piece struct {
|
|
|
|
PieceImpl
|
|
|
|
mip metainfo.Piece
|
|
|
|
}
|
|
|
|
|
2020-11-06 07:36:49 +08:00
|
|
|
var _ io.WriterTo = Piece{}
|
|
|
|
|
|
|
|
// Why do we have this wrapper? Well PieceImpl doesn't implement io.Reader, so we can't let io.Copy
|
|
|
|
// and friends check for io.WriterTo and fallback for us since they expect an io.Reader.
|
|
|
|
func (p Piece) WriteTo(w io.Writer) (int64, error) {
|
|
|
|
if i, ok := p.PieceImpl.(io.WriterTo); ok {
|
|
|
|
return i.WriteTo(w)
|
2020-11-02 12:35:07 +08:00
|
|
|
}
|
|
|
|
n := p.mip.Length()
|
|
|
|
r := io.NewSectionReader(p, 0, n)
|
2020-11-06 07:36:49 +08:00
|
|
|
return io.CopyN(w, r, n)
|
2020-11-02 12:35:07 +08:00
|
|
|
}
|
|
|
|
|
2016-09-02 13:10:57 +08:00
|
|
|
func (p Piece) WriteAt(b []byte, off int64) (n int, err error) {
|
2018-01-12 07:45:19 +08:00
|
|
|
// Callers should not be writing to completed pieces, but it's too
|
|
|
|
// expensive to be checking this on every single write using uncached
|
|
|
|
// completions.
|
|
|
|
|
|
|
|
// c := p.Completion()
|
|
|
|
// if c.Ok && c.Complete {
|
|
|
|
// err = errors.New("piece already completed")
|
|
|
|
// return
|
|
|
|
// }
|
2016-09-02 13:10:57 +08:00
|
|
|
if off+int64(len(b)) > p.mip.Length() {
|
|
|
|
panic("write overflows piece")
|
|
|
|
}
|
2017-12-03 10:44:08 +08:00
|
|
|
b = missinggo.LimitLen(b, p.mip.Length()-off)
|
2016-09-02 13:10:57 +08:00
|
|
|
return p.PieceImpl.WriteAt(b, off)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p Piece) ReadAt(b []byte, off int64) (n int, err error) {
|
|
|
|
if off < 0 {
|
|
|
|
err = os.ErrInvalid
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if off >= p.mip.Length() {
|
|
|
|
err = io.EOF
|
|
|
|
return
|
|
|
|
}
|
2017-12-03 10:44:08 +08:00
|
|
|
b = missinggo.LimitLen(b, p.mip.Length()-off)
|
2016-09-02 13:10:57 +08:00
|
|
|
if len(b) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
n, err = p.PieceImpl.ReadAt(b, off)
|
|
|
|
if n > len(b) {
|
|
|
|
panic(n)
|
|
|
|
}
|
|
|
|
if n == 0 && err == nil {
|
2020-02-27 13:45:57 +08:00
|
|
|
panic("io.Copy will get stuck")
|
2016-09-02 13:10:57 +08:00
|
|
|
}
|
2020-02-27 13:45:57 +08:00
|
|
|
off += int64(n)
|
2020-10-11 09:54:03 +08:00
|
|
|
|
|
|
|
// Doing this here may be inaccurate. There's legitimate reasons we may fail to read while the
|
|
|
|
// data is still there, such as too many open files. There should probably be a specific error
|
|
|
|
// to return if the data has been lost.
|
|
|
|
if off < p.mip.Length() {
|
|
|
|
if err == io.EOF {
|
|
|
|
p.MarkNotComplete()
|
|
|
|
}
|
2016-09-02 13:10:57 +08:00
|
|
|
}
|
2020-10-11 09:54:03 +08:00
|
|
|
|
2016-09-02 13:10:57 +08:00
|
|
|
return
|
|
|
|
}
|