Remove the InfoEx type, and don't generate its infohash on the fly
Fixes #106.
This commit is contained in:
parent
7b2561cea8
commit
2a1cef7c9e
13
client.go
13
client.go
|
@ -1407,7 +1407,7 @@ type TorrentSpec struct {
|
|||
// The tiered tracker URIs.
|
||||
Trackers [][]string
|
||||
InfoHash metainfo.Hash
|
||||
Info *metainfo.InfoEx
|
||||
InfoBytes []byte
|
||||
// The name to use if the Name field from the Info isn't available.
|
||||
DisplayName string
|
||||
// The chunk size to use for outbound requests. Defaults to 16KiB if not
|
||||
|
@ -1430,11 +1430,12 @@ func TorrentSpecFromMagnetURI(uri string) (spec *TorrentSpec, err error) {
|
|||
}
|
||||
|
||||
func TorrentSpecFromMetaInfo(mi *metainfo.MetaInfo) (spec *TorrentSpec) {
|
||||
info := mi.UnmarshalInfo()
|
||||
spec = &TorrentSpec{
|
||||
Trackers: mi.AnnounceList,
|
||||
Info: &mi.Info,
|
||||
DisplayName: mi.Info.Name,
|
||||
InfoHash: mi.Info.Hash(),
|
||||
InfoBytes: mi.InfoBytes,
|
||||
DisplayName: info.Name,
|
||||
InfoHash: mi.HashInfoBytes(),
|
||||
}
|
||||
if spec.Trackers == nil && mi.Announce != "" {
|
||||
spec.Trackers = [][]string{{mi.Announce}}
|
||||
|
@ -1470,8 +1471,8 @@ func (cl *Client) AddTorrentSpec(spec *TorrentSpec) (t *Torrent, new bool, err e
|
|||
if spec.DisplayName != "" {
|
||||
t.SetDisplayName(spec.DisplayName)
|
||||
}
|
||||
if spec.Info != nil {
|
||||
err = t.SetInfoBytes(spec.Info.Bytes)
|
||||
if spec.InfoBytes != nil {
|
||||
err = t.SetInfoBytes(spec.InfoBytes)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -88,14 +88,14 @@ func TestTorrentInitialState(t *testing.T) {
|
|||
dir, mi := testutil.GreetingTestTorrent()
|
||||
defer os.RemoveAll(dir)
|
||||
tor := &Torrent{
|
||||
infoHash: mi.Info.Hash(),
|
||||
infoHash: mi.HashInfoBytes(),
|
||||
pieceStateChanges: pubsub.NewPubSub(),
|
||||
}
|
||||
tor.chunkSize = 2
|
||||
tor.storageOpener = storage.NewFile("/dev/null")
|
||||
// Needed to lock for asynchronous piece verification.
|
||||
tor.cl = new(Client)
|
||||
err := tor.setInfoBytes(mi.Info.Bytes)
|
||||
err := tor.setInfoBytes(mi.InfoBytes)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, tor.pieces, 3)
|
||||
tor.pendAllChunkSpecs(0)
|
||||
|
@ -492,7 +492,7 @@ func TestMergingTrackersByAddingSpecs(t *testing.T) {
|
|||
|
||||
type badStorage struct{}
|
||||
|
||||
func (bs badStorage) OpenTorrent(*metainfo.InfoEx) (storage.Torrent, error) {
|
||||
func (bs badStorage) OpenTorrent(*metainfo.Info, metainfo.Hash) (storage.Torrent, error) {
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
|
@ -536,26 +536,24 @@ func TestCompletedPieceWrongSize(t *testing.T) {
|
|||
cl, err := NewClient(&cfg)
|
||||
require.NoError(t, err)
|
||||
defer cl.Close()
|
||||
ie := metainfo.InfoEx{
|
||||
Info: metainfo.Info{
|
||||
info := metainfo.Info{
|
||||
PieceLength: 15,
|
||||
Pieces: make([]byte, 20),
|
||||
Files: []metainfo.FileInfo{
|
||||
metainfo.FileInfo{Path: []string{"greeting"}, Length: 13},
|
||||
},
|
||||
},
|
||||
}
|
||||
ie.UpdateBytes()
|
||||
b, err := bencode.Marshal(info)
|
||||
tt, new, err := cl.AddTorrentSpec(&TorrentSpec{
|
||||
Info: &ie,
|
||||
InfoHash: ie.Hash(),
|
||||
InfoBytes: b,
|
||||
InfoHash: metainfo.HashBytes(b),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
defer tt.Drop()
|
||||
assert.True(t, new)
|
||||
r := tt.NewReader()
|
||||
defer r.Close()
|
||||
b, err := ioutil.ReadAll(r)
|
||||
b, err = ioutil.ReadAll(r)
|
||||
assert.Len(t, b, 13)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
@ -681,7 +679,7 @@ func TestAddTorrentSpecMerging(t *testing.T) {
|
|||
dir, mi := testutil.GreetingTestTorrent()
|
||||
defer os.RemoveAll(dir)
|
||||
tt, new, err := cl.AddTorrentSpec(&TorrentSpec{
|
||||
InfoHash: mi.Info.Hash(),
|
||||
InfoHash: mi.HashInfoBytes(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, new)
|
||||
|
@ -698,7 +696,7 @@ func TestTorrentDroppedBeforeGotInfo(t *testing.T) {
|
|||
cl, _ := NewClient(&TestingConfig)
|
||||
defer cl.Close()
|
||||
tt, _, _ := cl.AddTorrentSpec(&TorrentSpec{
|
||||
InfoHash: mi.Info.Hash(),
|
||||
InfoHash: mi.HashInfoBytes(),
|
||||
})
|
||||
tt.Drop()
|
||||
assert.EqualValues(t, 0, len(cl.Torrents()))
|
||||
|
@ -709,7 +707,7 @@ func TestTorrentDroppedBeforeGotInfo(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func writeTorrentData(ts storage.Torrent, info *metainfo.InfoEx, b []byte) {
|
||||
func writeTorrentData(ts storage.Torrent, info metainfo.Info, b []byte) {
|
||||
for i := range iter.N(info.NumPieces()) {
|
||||
n, _ := ts.Piece(info.Piece(i)).WriteAt(b, 0)
|
||||
b = b[n:]
|
||||
|
@ -725,13 +723,15 @@ func testAddTorrentPriorPieceCompletion(t *testing.T, alreadyCompleted bool, csf
|
|||
greetingDataTempDir, greetingMetainfo := testutil.GreetingTestTorrent()
|
||||
defer os.RemoveAll(greetingDataTempDir)
|
||||
filePieceStore := csf(fileCache)
|
||||
greetingData, err := filePieceStore.OpenTorrent(&greetingMetainfo.Info)
|
||||
info := greetingMetainfo.UnmarshalInfo()
|
||||
ih := greetingMetainfo.HashInfoBytes()
|
||||
greetingData, err := filePieceStore.OpenTorrent(&info, ih)
|
||||
require.NoError(t, err)
|
||||
writeTorrentData(greetingData, &greetingMetainfo.Info, []byte(testutil.GreetingFileContents))
|
||||
writeTorrentData(greetingData, info, []byte(testutil.GreetingFileContents))
|
||||
// require.Equal(t, len(testutil.GreetingFileContents), written)
|
||||
// require.NoError(t, err)
|
||||
for i := 0; i < greetingMetainfo.Info.NumPieces(); i++ {
|
||||
p := greetingMetainfo.Info.Piece(i)
|
||||
for i := 0; i < info.NumPieces(); i++ {
|
||||
p := info.Piece(i)
|
||||
if alreadyCompleted {
|
||||
err := greetingData.Piece(p).MarkComplete()
|
||||
assert.NoError(t, err)
|
||||
|
@ -871,17 +871,16 @@ func TestPeerInvalidHave(t *testing.T) {
|
|||
cl, err := NewClient(&TestingConfig)
|
||||
require.NoError(t, err)
|
||||
defer cl.Close()
|
||||
ie := metainfo.InfoEx{
|
||||
Info: metainfo.Info{
|
||||
info := metainfo.Info{
|
||||
PieceLength: 1,
|
||||
Pieces: make([]byte, 20),
|
||||
Files: []metainfo.FileInfo{{Length: 1}},
|
||||
},
|
||||
}
|
||||
ie.UpdateBytes()
|
||||
infoBytes, err := bencode.Marshal(info)
|
||||
require.NoError(t, err)
|
||||
tt, _new, err := cl.AddTorrentSpec(&TorrentSpec{
|
||||
Info: &ie,
|
||||
InfoHash: ie.Hash(),
|
||||
InfoBytes: infoBytes,
|
||||
InfoHash: metainfo.HashBytes(infoBytes),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, _new)
|
||||
|
@ -901,7 +900,7 @@ func TestPieceCompletedInStorageButNotClient(t *testing.T) {
|
|||
seeder, err := NewClient(&TestingConfig)
|
||||
require.NoError(t, err)
|
||||
seeder.AddTorrentSpec(&TorrentSpec{
|
||||
Info: &greetingMetainfo.Info,
|
||||
InfoBytes: greetingMetainfo.InfoBytes,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -980,7 +979,7 @@ func totalConns(tts []*Torrent) (ret int) {
|
|||
|
||||
func TestSetMaxEstablishedConn(t *testing.T) {
|
||||
var tts []*Torrent
|
||||
ih := testutil.GreetingMetaInfo().Info.Hash()
|
||||
ih := testutil.GreetingMetaInfo().HashInfoBytes()
|
||||
cfg := TestingConfig
|
||||
for i := range iter.N(3) {
|
||||
cl, err := NewClient(&cfg)
|
||||
|
|
|
@ -29,7 +29,7 @@ func main() {
|
|||
<-t.GotInfo()
|
||||
mi := t.Metainfo()
|
||||
t.Drop()
|
||||
f, err := os.Create(mi.Info.Name + ".torrent")
|
||||
f, err := os.Create(mi.UnmarshalInfo().Name + ".torrent")
|
||||
if err != nil {
|
||||
log.Fatalf("error creating torrent metainfo file: %s", err)
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
"github.com/anacrolix/tagflag"
|
||||
|
||||
"github.com/anacrolix/torrent/bencode"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
)
|
||||
|
||||
|
@ -32,7 +33,14 @@ func main() {
|
|||
mi.AnnounceList = append(mi.AnnounceList, []string{a})
|
||||
}
|
||||
mi.SetDefaults()
|
||||
err := mi.Info.BuildFromFilePath(args.Root)
|
||||
info := metainfo.Info{
|
||||
PieceLength: 256 * 1024,
|
||||
}
|
||||
err := info.BuildFromFilePath(args.Root)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
mi.InfoBytes, err = bencode.Marshal(info)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -20,6 +20,6 @@ func main() {
|
|||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("%s: %s\n", mi.Info.Hash().HexString(), arg)
|
||||
fmt.Printf("%s: %s\n", mi.HashInfoBytes().HexString(), arg)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ func main() {
|
|||
fmt.Fprintf(os.Stderr, "error reading metainfo from stdin: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
info := mi.UnmarshalInfo()
|
||||
|
||||
fmt.Fprintf(os.Stdout, "%s\n", mi.Magnet().String())
|
||||
fmt.Fprintf(os.Stdout, "%s\n", mi.Magnet(info.Name, mi.HashInfoBytes()).String())
|
||||
}
|
||||
|
|
|
@ -29,16 +29,16 @@ func main() {
|
|||
log.Print(err)
|
||||
continue
|
||||
}
|
||||
info := &metainfo.Info.Info
|
||||
info := metainfo.UnmarshalInfo()
|
||||
if flags.JustName {
|
||||
fmt.Printf("%s\n", metainfo.Info.Name)
|
||||
fmt.Printf("%s\n", info.Name)
|
||||
continue
|
||||
}
|
||||
d := map[string]interface{}{
|
||||
"Name": info.Name,
|
||||
"NumPieces": info.NumPieces(),
|
||||
"PieceLength": info.PieceLength,
|
||||
"InfoHash": metainfo.Info.Hash().HexString(),
|
||||
"InfoHash": metainfo.HashInfoBytes().HexString(),
|
||||
"NumFiles": len(info.UpvertedFiles()),
|
||||
"TotalLength": info.TotalLength(),
|
||||
"Announce": metainfo.Announce,
|
||||
|
|
|
@ -49,22 +49,22 @@ func main() {
|
|||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
info := metaInfo.UnmarshalInfo()
|
||||
mMapSpan := &mmap_span.MMapSpan{}
|
||||
if len(metaInfo.Info.Files) > 0 {
|
||||
for _, file := range metaInfo.Info.Files {
|
||||
filename := filepath.Join(append([]string{*dataPath, metaInfo.Info.Name}, file.Path...)...)
|
||||
if len(info.Files) > 0 {
|
||||
for _, file := range info.Files {
|
||||
filename := filepath.Join(append([]string{*dataPath, info.Name}, file.Path...)...)
|
||||
goMMap := fileToMmap(filename, file.Length)
|
||||
mMapSpan.Append(goMMap)
|
||||
}
|
||||
log.Println(len(metaInfo.Info.Files))
|
||||
log.Println(len(info.Files))
|
||||
} else {
|
||||
goMMap := fileToMmap(*dataPath, metaInfo.Info.Length)
|
||||
goMMap := fileToMmap(*dataPath, info.Length)
|
||||
mMapSpan.Append(goMMap)
|
||||
}
|
||||
log.Println(mMapSpan.Size())
|
||||
log.Println(len(metaInfo.Info.Pieces))
|
||||
info := metaInfo.Info
|
||||
for i := range iter.N(metaInfo.Info.NumPieces()) {
|
||||
log.Println(len(info.Pieces))
|
||||
for i := range iter.N(info.NumPieces()) {
|
||||
p := info.Piece(i)
|
||||
hash := sha1.New()
|
||||
_, err := io.Copy(hash, io.NewSectionReader(mMapSpan, p.Offset(), p.Length()))
|
||||
|
|
|
@ -49,7 +49,7 @@ type rootNode struct {
|
|||
|
||||
type node struct {
|
||||
path string
|
||||
metadata *metainfo.InfoEx
|
||||
metadata *metainfo.Info
|
||||
FS *TorrentFS
|
||||
t *torrent.Torrent
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ func TestUnmountWedged(t *testing.T) {
|
|||
})
|
||||
require.NoError(t, err)
|
||||
defer client.Close()
|
||||
_, err = client.AddTorrent(layout.Metainfo)
|
||||
tt, err := client.AddTorrent(layout.Metainfo)
|
||||
require.NoError(t, err)
|
||||
fs := New(client)
|
||||
fuseConn, err := fuse.Mount(layout.MountDir)
|
||||
|
@ -124,7 +124,7 @@ func TestUnmountWedged(t *testing.T) {
|
|||
// "wedge" FUSE, requiring the fs object to be forcibly destroyed. The
|
||||
// read call will return with a FS error.
|
||||
go func() {
|
||||
_, err := ioutil.ReadFile(filepath.Join(layout.MountDir, layout.Metainfo.Info.Name))
|
||||
_, err := ioutil.ReadFile(filepath.Join(layout.MountDir, tt.Info().Name))
|
||||
if err == nil {
|
||||
t.Fatal("expected error reading greeting")
|
||||
}
|
||||
|
@ -169,7 +169,7 @@ func TestDownloadOnDemand(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
defer seeder.Close()
|
||||
testutil.ExportStatusWriter(seeder, "s")
|
||||
_, err = seeder.AddMagnet(fmt.Sprintf("magnet:?xt=urn:btih:%s", layout.Metainfo.Info.Hash().HexString()))
|
||||
_, err = seeder.AddMagnet(fmt.Sprintf("magnet:?xt=urn:btih:%s", layout.Metainfo.HashInfoBytes().HexString()))
|
||||
require.NoError(t, err)
|
||||
leecher, err := torrent.NewClient(&torrent.Config{
|
||||
DisableTrackers: true,
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
|
||||
"github.com/anacrolix/missinggo"
|
||||
|
||||
"github.com/anacrolix/torrent/bencode"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
)
|
||||
|
||||
|
@ -30,19 +31,25 @@ func CreateDummyTorrentData(dirName string) string {
|
|||
f.WriteString(GreetingFileContents)
|
||||
return f.Name()
|
||||
}
|
||||
func GreetingMetaInfo() (mi *metainfo.MetaInfo) {
|
||||
mi = new(metainfo.MetaInfo)
|
||||
mi.Info.Name = GreetingFileName
|
||||
mi.Info.Length = int64(len(GreetingFileContents))
|
||||
mi.Info.PieceLength = 5
|
||||
err := mi.Info.GeneratePieces(func(metainfo.FileInfo) (io.ReadCloser, error) {
|
||||
|
||||
func GreetingMetaInfo() *metainfo.MetaInfo {
|
||||
info := metainfo.Info{
|
||||
Name: GreetingFileName,
|
||||
Length: int64(len(GreetingFileContents)),
|
||||
PieceLength: 5,
|
||||
}
|
||||
err := info.GeneratePieces(func(metainfo.FileInfo) (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(strings.NewReader(GreetingFileContents)), nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
mi.Info.UpdateBytes()
|
||||
return
|
||||
mi := &metainfo.MetaInfo{}
|
||||
mi.InfoBytes, err = bencode.Marshal(info)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return mi
|
||||
}
|
||||
|
||||
// Gives a temporary directory containing the completed "greeting" torrent,
|
||||
|
|
|
@ -17,9 +17,11 @@ func TestHashPieceAfterStorageClosed(t *testing.T) {
|
|||
defer os.RemoveAll(td)
|
||||
cs := storage.NewFile(td)
|
||||
tt := &Torrent{}
|
||||
tt.info = &testutil.GreetingMetaInfo().Info
|
||||
mi := testutil.GreetingMetaInfo()
|
||||
info := mi.UnmarshalInfo()
|
||||
tt.info = &info
|
||||
tt.makePieces()
|
||||
tt.storage, err = cs.OpenTorrent(tt.info)
|
||||
tt.storage, err = cs.OpenTorrent(tt.info, mi.HashInfoBytes())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, tt.storage.Close())
|
||||
tt.hashPiece(0)
|
||||
|
|
|
@ -0,0 +1,156 @@
|
|||
package metainfo
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/anacrolix/missinggo/slices"
|
||||
)
|
||||
|
||||
// The info dictionary.
|
||||
type Info struct {
|
||||
PieceLength int64 `bencode:"piece length"`
|
||||
Pieces []byte `bencode:"pieces"`
|
||||
Name string `bencode:"name"`
|
||||
Length int64 `bencode:"length,omitempty"`
|
||||
Private *bool `bencode:"private,omitempty"`
|
||||
Files []FileInfo `bencode:"files,omitempty"`
|
||||
}
|
||||
|
||||
// This is a helper that sets Files and Pieces from a root path and its
|
||||
// children.
|
||||
func (info *Info) BuildFromFilePath(root string) (err error) {
|
||||
info.Name = filepath.Base(root)
|
||||
info.Files = nil
|
||||
err = filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
// Directories are implicit in torrent files.
|
||||
return nil
|
||||
} else if path == root {
|
||||
// The root is a file.
|
||||
info.Length = fi.Size()
|
||||
return nil
|
||||
}
|
||||
relPath, err := filepath.Rel(root, path)
|
||||
log.Println(relPath, err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting relative path: %s", err)
|
||||
}
|
||||
info.Files = append(info.Files, FileInfo{
|
||||
Path: strings.Split(relPath, string(filepath.Separator)),
|
||||
Length: fi.Size(),
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
slices.Sort(info.Files, func(l, r FileInfo) bool {
|
||||
return strings.Join(l.Path, "/") < strings.Join(r.Path, "/")
|
||||
})
|
||||
err = info.GeneratePieces(func(fi FileInfo) (io.ReadCloser, error) {
|
||||
return os.Open(filepath.Join(root, strings.Join(fi.Path, string(filepath.Separator))))
|
||||
})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error generating pieces: %s", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (info *Info) writeFiles(w io.Writer, open func(fi FileInfo) (io.ReadCloser, error)) error {
|
||||
for _, fi := range info.UpvertedFiles() {
|
||||
r, err := open(fi)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening %v: %s", fi, err)
|
||||
}
|
||||
wn, err := io.CopyN(w, r, fi.Length)
|
||||
r.Close()
|
||||
if wn != fi.Length || err != nil {
|
||||
return fmt.Errorf("error hashing %v: %s", fi, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set info.Pieces by hashing info.Files.
|
||||
func (info *Info) GeneratePieces(open func(fi FileInfo) (io.ReadCloser, error)) error {
|
||||
if info.PieceLength == 0 {
|
||||
return errors.New("piece length must be non-zero")
|
||||
}
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
err := info.writeFiles(pw, open)
|
||||
pw.CloseWithError(err)
|
||||
}()
|
||||
defer pr.Close()
|
||||
var pieces []byte
|
||||
for {
|
||||
hasher := sha1.New()
|
||||
wn, err := io.CopyN(hasher, pr, info.PieceLength)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if wn == 0 {
|
||||
break
|
||||
}
|
||||
pieces = hasher.Sum(pieces)
|
||||
if wn < info.PieceLength {
|
||||
break
|
||||
}
|
||||
}
|
||||
info.Pieces = pieces
|
||||
return nil
|
||||
}
|
||||
|
||||
func (info *Info) TotalLength() (ret int64) {
|
||||
if info.IsDir() {
|
||||
for _, fi := range info.Files {
|
||||
ret += fi.Length
|
||||
}
|
||||
} else {
|
||||
ret = info.Length
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (info *Info) NumPieces() int {
|
||||
if len(info.Pieces)%20 != 0 {
|
||||
panic(len(info.Pieces))
|
||||
}
|
||||
return len(info.Pieces) / 20
|
||||
}
|
||||
|
||||
func (info *Info) IsDir() bool {
|
||||
return len(info.Files) != 0
|
||||
}
|
||||
|
||||
// The files field, converted up from the old single-file in the parent info
|
||||
// dict if necessary. This is a helper to avoid having to conditionally handle
|
||||
// single and multi-file torrent infos.
|
||||
func (info *Info) UpvertedFiles() []FileInfo {
|
||||
if len(info.Files) == 0 {
|
||||
return []FileInfo{{
|
||||
Length: info.Length,
|
||||
// Callers should determine that Info.Name is the basename, and
|
||||
// thus a regular file.
|
||||
Path: nil,
|
||||
}}
|
||||
}
|
||||
return info.Files
|
||||
}
|
||||
|
||||
func (info *Info) Piece(index int) Piece {
|
||||
return Piece{info, index}
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
package metainfo
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/anacrolix/torrent/bencode"
|
||||
)
|
||||
|
||||
func TestMarshalInfo(t *testing.T) {
|
||||
var info Info
|
||||
b, err := bencode.Marshal(info)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, "d4:name0:12:piece lengthi0e6:pieceslee", string(b))
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
package metainfo
|
||||
|
||||
import "github.com/anacrolix/torrent/bencode"
|
||||
|
||||
// A wrapper around Info that exposes the Bytes directly, in case marshalling
|
||||
// and unmarshalling Info doesn't produce the same bytes.
|
||||
type InfoEx struct {
|
||||
Info
|
||||
// Set when unmarshalling, and used when marshalling. Call .UpdateBytes to
|
||||
// set it by bencoding Info.
|
||||
Bytes []byte
|
||||
}
|
||||
|
||||
var (
|
||||
_ bencode.Marshaler = &InfoEx{}
|
||||
_ bencode.Unmarshaler = &InfoEx{}
|
||||
)
|
||||
|
||||
// Marshals .Info, and sets .Bytes with the result.
|
||||
func (ie *InfoEx) UpdateBytes() {
|
||||
var err error
|
||||
ie.Bytes, err = bencode.Marshal(&ie.Info)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the SHA1 hash of .Bytes.
|
||||
func (ie *InfoEx) Hash() Hash {
|
||||
return HashBytes(ie.Bytes)
|
||||
}
|
||||
|
||||
func (ie *InfoEx) UnmarshalBencode(data []byte) error {
|
||||
ie.Bytes = append([]byte(nil), data...)
|
||||
return bencode.Unmarshal(data, &ie.Info)
|
||||
}
|
||||
|
||||
func (ie *InfoEx) MarshalBencode() ([]byte, error) {
|
||||
if ie.Bytes == nil {
|
||||
ie.UpdateBytes()
|
||||
}
|
||||
return ie.Bytes, nil
|
||||
}
|
||||
|
||||
func (info *InfoEx) Piece(i int) Piece {
|
||||
return Piece{info, i}
|
||||
}
|
|
@ -70,11 +70,11 @@ func TestParseMagnetURI(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
func Test_Magnetize(t *testing.T) {
|
||||
func TestMagnetize(t *testing.T) {
|
||||
mi, err := LoadFromFile("../testdata/bootstrap.dat.torrent")
|
||||
require.NoError(t, err)
|
||||
|
||||
m := mi.Magnet()
|
||||
m := mi.Magnet(mi.UnmarshalInfo().Name, mi.HashInfoBytes())
|
||||
|
||||
assert.EqualValues(t, "bootstrap.dat", m.DisplayName)
|
||||
|
||||
|
|
|
@ -1,20 +1,26 @@
|
|||
package metainfo
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/anacrolix/missinggo/slices"
|
||||
"github.com/anacrolix/torrent/bencode"
|
||||
)
|
||||
|
||||
type MetaInfo struct {
|
||||
InfoBytes bencode.Bytes `bencode:"info"`
|
||||
Announce string `bencode:"announce,omitempty"`
|
||||
AnnounceList [][]string `bencode:"announce-list,omitempty"`
|
||||
Nodes []Node `bencode:"nodes,omitempty"`
|
||||
CreationDate int64 `bencode:"creation date,omitempty"`
|
||||
Comment string `bencode:"comment,omitempty"`
|
||||
CreatedBy string `bencode:"created by,omitempty"`
|
||||
Encoding string `bencode:"encoding,omitempty"`
|
||||
URLList interface{} `bencode:"url-list,omitempty"`
|
||||
}
|
||||
|
||||
// Information specific to a single file inside the MetaInfo structure.
|
||||
type FileInfo struct {
|
||||
Length int64 `bencode:"length"`
|
||||
|
@ -43,158 +49,20 @@ func LoadFromFile(filename string) (*MetaInfo, error) {
|
|||
return Load(f)
|
||||
}
|
||||
|
||||
// The info dictionary.
|
||||
type Info struct {
|
||||
PieceLength int64 `bencode:"piece length"`
|
||||
Pieces []byte `bencode:"pieces"`
|
||||
Name string `bencode:"name"`
|
||||
Length int64 `bencode:"length,omitempty"`
|
||||
Private *bool `bencode:"private,omitempty"`
|
||||
Files []FileInfo `bencode:"files,omitempty"`
|
||||
}
|
||||
|
||||
// This is a helper that sets Files and Pieces from a root path and its
|
||||
// children.
|
||||
func (info *Info) BuildFromFilePath(root string) (err error) {
|
||||
info.Name = filepath.Base(root)
|
||||
info.Files = nil
|
||||
err = filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
|
||||
func (mi MetaInfo) UnmarshalInfo() (info Info) {
|
||||
err := bencode.Unmarshal(mi.InfoBytes, &info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
// Directories are implicit in torrent files.
|
||||
return nil
|
||||
} else if path == root {
|
||||
// The root is a file.
|
||||
info.Length = fi.Size()
|
||||
return nil
|
||||
}
|
||||
relPath, err := filepath.Rel(root, path)
|
||||
log.Println(relPath, err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting relative path: %s", err)
|
||||
}
|
||||
info.Files = append(info.Files, FileInfo{
|
||||
Path: strings.Split(relPath, string(filepath.Separator)),
|
||||
Length: fi.Size(),
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
slices.Sort(info.Files, func(l, r FileInfo) bool {
|
||||
return strings.Join(l.Path, "/") < strings.Join(r.Path, "/")
|
||||
})
|
||||
err = info.GeneratePieces(func(fi FileInfo) (io.ReadCloser, error) {
|
||||
return os.Open(filepath.Join(root, strings.Join(fi.Path, string(filepath.Separator))))
|
||||
})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error generating pieces: %s", err)
|
||||
panic(fmt.Sprintf("bad info bytes: %s", err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (info *Info) writeFiles(w io.Writer, open func(fi FileInfo) (io.ReadCloser, error)) error {
|
||||
for _, fi := range info.UpvertedFiles() {
|
||||
r, err := open(fi)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening %v: %s", fi, err)
|
||||
}
|
||||
wn, err := io.CopyN(w, r, fi.Length)
|
||||
r.Close()
|
||||
if wn != fi.Length || err != nil {
|
||||
return fmt.Errorf("error hashing %v: %s", fi, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set info.Pieces by hashing info.Files.
|
||||
func (info *Info) GeneratePieces(open func(fi FileInfo) (io.ReadCloser, error)) error {
|
||||
if info.PieceLength == 0 {
|
||||
return errors.New("piece length must be non-zero")
|
||||
}
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
err := info.writeFiles(pw, open)
|
||||
pw.CloseWithError(err)
|
||||
}()
|
||||
defer pr.Close()
|
||||
var pieces []byte
|
||||
for {
|
||||
hasher := sha1.New()
|
||||
wn, err := io.CopyN(hasher, pr, info.PieceLength)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if wn == 0 {
|
||||
break
|
||||
}
|
||||
pieces = hasher.Sum(pieces)
|
||||
if wn < info.PieceLength {
|
||||
break
|
||||
}
|
||||
}
|
||||
info.Pieces = pieces
|
||||
return nil
|
||||
}
|
||||
|
||||
func (info *Info) TotalLength() (ret int64) {
|
||||
if info.IsDir() {
|
||||
for _, fi := range info.Files {
|
||||
ret += fi.Length
|
||||
}
|
||||
} else {
|
||||
ret = info.Length
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (info *Info) NumPieces() int {
|
||||
if len(info.Pieces)%20 != 0 {
|
||||
panic(len(info.Pieces))
|
||||
}
|
||||
return len(info.Pieces) / 20
|
||||
}
|
||||
|
||||
func (info *Info) IsDir() bool {
|
||||
return len(info.Files) != 0
|
||||
}
|
||||
|
||||
// The files field, converted up from the old single-file in the parent info
|
||||
// dict if necessary. This is a helper to avoid having to conditionally handle
|
||||
// single and multi-file torrent infos.
|
||||
func (info *Info) UpvertedFiles() []FileInfo {
|
||||
if len(info.Files) == 0 {
|
||||
return []FileInfo{{
|
||||
Length: info.Length,
|
||||
// Callers should determine that Info.Name is the basename, and
|
||||
// thus a regular file.
|
||||
Path: nil,
|
||||
}}
|
||||
}
|
||||
return info.Files
|
||||
}
|
||||
|
||||
type MetaInfo struct {
|
||||
Info InfoEx `bencode:"info"`
|
||||
Announce string `bencode:"announce,omitempty"`
|
||||
AnnounceList [][]string `bencode:"announce-list,omitempty"`
|
||||
Nodes []Node `bencode:"nodes,omitempty"`
|
||||
CreationDate int64 `bencode:"creation date,omitempty"`
|
||||
Comment string `bencode:"comment,omitempty"`
|
||||
CreatedBy string `bencode:"created by,omitempty"`
|
||||
Encoding string `bencode:"encoding,omitempty"`
|
||||
URLList interface{} `bencode:"url-list,omitempty"`
|
||||
func (mi MetaInfo) HashInfoBytes() (infoHash Hash) {
|
||||
return HashBytes(mi.InfoBytes)
|
||||
}
|
||||
|
||||
// Encode to bencoded form.
|
||||
func (mi *MetaInfo) Write(w io.Writer) error {
|
||||
func (mi MetaInfo) Write(w io.Writer) error {
|
||||
return bencode.NewEncoder(w).Encode(mi)
|
||||
}
|
||||
|
||||
|
@ -203,11 +71,11 @@ func (mi *MetaInfo) SetDefaults() {
|
|||
mi.Comment = "yoloham"
|
||||
mi.CreatedBy = "github.com/anacrolix/torrent"
|
||||
mi.CreationDate = time.Now().Unix()
|
||||
mi.Info.PieceLength = 256 * 1024
|
||||
// mi.Info.PieceLength = 256 * 1024
|
||||
}
|
||||
|
||||
// Creates a Magnet from a MetaInfo.
|
||||
func (mi *MetaInfo) Magnet() (m Magnet) {
|
||||
func (mi *MetaInfo) Magnet(displayName string, infoHash Hash) (m Magnet) {
|
||||
for _, tier := range mi.AnnounceList {
|
||||
for _, tracker := range tier {
|
||||
m.Trackers = append(m.Trackers, tracker)
|
||||
|
@ -216,7 +84,7 @@ func (mi *MetaInfo) Magnet() (m Magnet) {
|
|||
if m.Trackers == nil && mi.Announce != "" {
|
||||
m.Trackers = []string{mi.Announce}
|
||||
}
|
||||
m.DisplayName = mi.Info.Name
|
||||
m.InfoHash = mi.Info.Hash()
|
||||
m.DisplayName = displayName
|
||||
m.InfoHash = infoHash
|
||||
return
|
||||
}
|
||||
|
|
|
@ -19,11 +19,12 @@ func testFile(t *testing.T, filename string) {
|
|||
mi, err := LoadFromFile(filename)
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(mi.Info.Files) == 1 {
|
||||
t.Logf("Single file: %s (length: %d)\n", mi.Info.Name, mi.Info.Files[0].Length)
|
||||
info := mi.UnmarshalInfo()
|
||||
if len(info.Files) == 1 {
|
||||
t.Logf("Single file: %s (length: %d)\n", info.Name, info.Files[0].Length)
|
||||
} else {
|
||||
t.Logf("Multiple files: %s\n", mi.Info.Name)
|
||||
for _, f := range mi.Info.Files {
|
||||
t.Logf("Multiple files: %s\n", info.Name)
|
||||
for _, f := range info.Files {
|
||||
t.Logf(" - %s (length: %d)\n", path.Join(f.Path...), f.Length)
|
||||
}
|
||||
}
|
||||
|
@ -34,9 +35,9 @@ func testFile(t *testing.T, filename string) {
|
|||
}
|
||||
}
|
||||
|
||||
b, err := bencode.Marshal(&mi.Info.Info)
|
||||
b, err := bencode.Marshal(&info)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, string(b), string(mi.Info.Bytes))
|
||||
assert.EqualValues(t, string(b), string(mi.InfoBytes))
|
||||
}
|
||||
|
||||
func TestFile(t *testing.T) {
|
||||
|
@ -96,3 +97,21 @@ func TestBuildFromFilePathOrder(t *testing.T) {
|
|||
Path: []string{"b"},
|
||||
}}, info.Files)
|
||||
}
|
||||
|
||||
func testUnmarshal(t *testing.T, input string, expected *MetaInfo) {
|
||||
var actual MetaInfo
|
||||
err := bencode.Unmarshal([]byte(input), &actual)
|
||||
if expected == nil {
|
||||
assert.Error(t, err)
|
||||
return
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, *expected, actual)
|
||||
}
|
||||
|
||||
func TestUnmarshal(t *testing.T) {
|
||||
testUnmarshal(t, `de`, &MetaInfo{})
|
||||
testUnmarshal(t, `d4:infoe`, &MetaInfo{})
|
||||
testUnmarshal(t, `d4:infoabce`, nil)
|
||||
testUnmarshal(t, `d4:infodee`, &MetaInfo{InfoBytes: []byte("de")})
|
||||
}
|
||||
|
|
|
@ -42,15 +42,16 @@ func TestNodesListPairsBEP5(t *testing.T) {
|
|||
}
|
||||
|
||||
func testMarshalMetainfo(t *testing.T, expected string, mi *MetaInfo) {
|
||||
b, err := bencode.Marshal(mi)
|
||||
b, err := bencode.Marshal(*mi)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, expected, string(b))
|
||||
}
|
||||
|
||||
func TestMarshalMetainfoNodes(t *testing.T) {
|
||||
testMarshalMetainfo(t, "d4:infod4:name0:12:piece lengthi0e6:piecesleee", &MetaInfo{})
|
||||
testMarshalMetainfo(t, "d4:infod4:name0:12:piece lengthi0e6:pieceslee5:nodesl12:1.2.3.4:555514:not a hostportee", &MetaInfo{
|
||||
testMarshalMetainfo(t, "d4:infodee", &MetaInfo{InfoBytes: []byte("de")})
|
||||
testMarshalMetainfo(t, "d4:infod2:hi5:theree5:nodesl12:1.2.3.4:555514:not a hostportee", &MetaInfo{
|
||||
Nodes: []Node{"1.2.3.4:5555", "not a hostport"},
|
||||
InfoBytes: []byte("d2:hi5:theree"),
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ package metainfo
|
|||
import "github.com/anacrolix/missinggo"
|
||||
|
||||
type Piece struct {
|
||||
Info *InfoEx
|
||||
Info *Info
|
||||
i int
|
||||
}
|
||||
|
||||
|
@ -26,12 +26,3 @@ func (p Piece) Hash() (ret Hash) {
|
|||
func (p Piece) Index() int {
|
||||
return p.i
|
||||
}
|
||||
|
||||
func (p Piece) Key() PieceKey {
|
||||
return PieceKey{p.Info.Hash(), p.i}
|
||||
}
|
||||
|
||||
type PieceKey struct {
|
||||
Hash Hash
|
||||
Index int
|
||||
}
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
package metainfo
|
||||
|
||||
type PieceKey struct {
|
||||
InfoHash Hash
|
||||
Index int
|
||||
}
|
|
@ -7,8 +7,8 @@ import (
|
|||
)
|
||||
|
||||
type pieceCompletion interface {
|
||||
Get(metainfo.Piece) (bool, error)
|
||||
Set(metainfo.Piece, bool) error
|
||||
Get(metainfo.PieceKey) (bool, error)
|
||||
Set(metainfo.PieceKey, bool) error
|
||||
Close()
|
||||
}
|
||||
|
||||
|
|
|
@ -10,19 +10,19 @@ type mapPieceCompletion struct {
|
|||
|
||||
func (mapPieceCompletion) Close() {}
|
||||
|
||||
func (me *mapPieceCompletion) Get(p metainfo.Piece) (bool, error) {
|
||||
_, ok := me.m[p.Key()]
|
||||
func (me *mapPieceCompletion) Get(pk metainfo.PieceKey) (bool, error) {
|
||||
_, ok := me.m[pk]
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
func (me *mapPieceCompletion) Set(p metainfo.Piece, b bool) error {
|
||||
func (me *mapPieceCompletion) Set(pk metainfo.PieceKey, b bool) error {
|
||||
if b {
|
||||
if me.m == nil {
|
||||
me.m = make(map[metainfo.PieceKey]struct{})
|
||||
}
|
||||
me.m[p.Key()] = struct{}{}
|
||||
me.m[pk] = struct{}{}
|
||||
} else {
|
||||
delete(me.m, p.Key())
|
||||
delete(me.m, pk)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -26,17 +26,17 @@ func newDBPieceCompletion(dir string) (ret *dbPieceCompletion, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func (me *dbPieceCompletion) Get(p metainfo.Piece) (ret bool, err error) {
|
||||
row := me.db.QueryRow(`select exists(select * from completed where infohash=? and "index"=?)`, p.Info.Hash().HexString(), p.Index())
|
||||
func (me *dbPieceCompletion) Get(pk metainfo.PieceKey) (ret bool, err error) {
|
||||
row := me.db.QueryRow(`select exists(select * from completed where infohash=? and "index"=?)`, pk.InfoHash.HexString(), pk.Index)
|
||||
err = row.Scan(&ret)
|
||||
return
|
||||
}
|
||||
|
||||
func (me *dbPieceCompletion) Set(p metainfo.Piece, b bool) (err error) {
|
||||
func (me *dbPieceCompletion) Set(pk metainfo.PieceKey, b bool) (err error) {
|
||||
if b {
|
||||
_, err = me.db.Exec(`insert into completed (infohash, "index") values (?, ?)`, p.Info.Hash().HexString(), p.Index())
|
||||
_, err = me.db.Exec(`insert into completed (infohash, "index") values (?, ?)`, pk.InfoHash.HexString(), pk.Index)
|
||||
} else {
|
||||
_, err = me.db.Exec(`delete from completed where infohash=? and "index"=?`, p.Info.Hash().HexString(), p.Index())
|
||||
_, err = me.db.Exec(`delete from completed where infohash=? and "index"=?`, pk.InfoHash.HexString(), pk.Index)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -23,10 +23,11 @@ func NewFile(baseDir string) Client {
|
|||
}
|
||||
}
|
||||
|
||||
func (fs *fileStorage) OpenTorrent(info *metainfo.InfoEx) (Torrent, error) {
|
||||
func (fs *fileStorage) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (Torrent, error) {
|
||||
return &fileTorrentStorage{
|
||||
fs,
|
||||
&info.Info,
|
||||
info,
|
||||
infoHash,
|
||||
pieceCompletionForDir(fs.baseDir),
|
||||
}, nil
|
||||
}
|
||||
|
@ -35,6 +36,7 @@ func (fs *fileStorage) OpenTorrent(info *metainfo.InfoEx) (Torrent, error) {
|
|||
type fileTorrentStorage struct {
|
||||
fs *fileStorage
|
||||
info *metainfo.Info
|
||||
infoHash metainfo.Hash
|
||||
completion pieceCompletion
|
||||
}
|
||||
|
||||
|
|
|
@ -9,32 +9,29 @@ import (
|
|||
)
|
||||
|
||||
func TestExtentCompleteRequiredLengths(t *testing.T) {
|
||||
info := &metainfo.InfoEx{
|
||||
Info: metainfo.Info{
|
||||
info := &metainfo.Info{
|
||||
Files: []metainfo.FileInfo{
|
||||
{Path: []string{"a"}, Length: 2},
|
||||
{Path: []string{"b"}, Length: 3},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Empty(t, extentCompleteRequiredLengths(&info.Info, 0, 0))
|
||||
assert.Empty(t, extentCompleteRequiredLengths(info, 0, 0))
|
||||
assert.EqualValues(t, []metainfo.FileInfo{
|
||||
{Path: []string{"a"}, Length: 1},
|
||||
}, extentCompleteRequiredLengths(&info.Info, 0, 1))
|
||||
}, extentCompleteRequiredLengths(info, 0, 1))
|
||||
assert.EqualValues(t, []metainfo.FileInfo{
|
||||
{Path: []string{"a"}, Length: 2},
|
||||
}, extentCompleteRequiredLengths(&info.Info, 0, 2))
|
||||
}, extentCompleteRequiredLengths(info, 0, 2))
|
||||
assert.EqualValues(t, []metainfo.FileInfo{
|
||||
{Path: []string{"a"}, Length: 2},
|
||||
{Path: []string{"b"}, Length: 1},
|
||||
}, extentCompleteRequiredLengths(&info.Info, 0, 3))
|
||||
}, extentCompleteRequiredLengths(info, 0, 3))
|
||||
assert.EqualValues(t, []metainfo.FileInfo{
|
||||
{Path: []string{"b"}, Length: 2},
|
||||
}, extentCompleteRequiredLengths(&info.Info, 2, 2))
|
||||
}, extentCompleteRequiredLengths(info, 2, 2))
|
||||
assert.EqualValues(t, []metainfo.FileInfo{
|
||||
{Path: []string{"b"}, Length: 3},
|
||||
}, extentCompleteRequiredLengths(&info.Info, 4, 1))
|
||||
assert.Len(t, extentCompleteRequiredLengths(&info.Info, 5, 0), 0)
|
||||
assert.Panics(t, func() { extentCompleteRequiredLengths(&info.Info, 6, 1) })
|
||||
|
||||
}, extentCompleteRequiredLengths(info, 4, 1))
|
||||
assert.Len(t, extentCompleteRequiredLengths(info, 5, 0), 0)
|
||||
assert.Panics(t, func() { extentCompleteRequiredLengths(info, 6, 1) })
|
||||
}
|
||||
|
|
|
@ -14,14 +14,18 @@ type fileStoragePiece struct {
|
|||
r io.ReaderAt
|
||||
}
|
||||
|
||||
func (me *fileStoragePiece) pieceKey() metainfo.PieceKey {
|
||||
return metainfo.PieceKey{me.infoHash, me.p.Index()}
|
||||
}
|
||||
|
||||
func (fs *fileStoragePiece) GetIsComplete() bool {
|
||||
ret, err := fs.completion.Get(fs.p)
|
||||
ret, err := fs.completion.Get(fs.pieceKey())
|
||||
if err != nil || !ret {
|
||||
return false
|
||||
}
|
||||
// If it's allegedly complete, check that its constituent files have the
|
||||
// necessary length.
|
||||
for _, fi := range extentCompleteRequiredLengths(&fs.p.Info.Info, fs.p.Offset(), fs.p.Length()) {
|
||||
for _, fi := range extentCompleteRequiredLengths(fs.p.Info, fs.p.Offset(), fs.p.Length()) {
|
||||
s, err := os.Stat(fs.fileInfoName(fi))
|
||||
if err != nil || s.Size() < fi.Length {
|
||||
ret = false
|
||||
|
@ -32,12 +36,12 @@ func (fs *fileStoragePiece) GetIsComplete() bool {
|
|||
return true
|
||||
}
|
||||
// The completion was wrong, fix it.
|
||||
fs.completion.Set(fs.p, false)
|
||||
fs.completion.Set(fs.pieceKey(), false)
|
||||
return false
|
||||
}
|
||||
|
||||
func (fs *fileStoragePiece) MarkComplete() error {
|
||||
fs.completion.Set(fs.p, true)
|
||||
fs.completion.Set(fs.pieceKey(), true)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -50,6 +54,6 @@ func (fsp *fileStoragePiece) ReadAt(b []byte, off int64) (n int, err error) {
|
|||
if off < 0 || off >= fsp.p.Length() {
|
||||
return
|
||||
}
|
||||
fsp.completion.Set(fsp.p, false)
|
||||
fsp.completion.Set(fsp.pieceKey(), false)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -20,14 +20,12 @@ func TestShortFile(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
defer os.RemoveAll(td)
|
||||
s := NewFile(td)
|
||||
info := &metainfo.InfoEx{
|
||||
Info: metainfo.Info{
|
||||
info := &metainfo.Info{
|
||||
Name: "a",
|
||||
Length: 2,
|
||||
PieceLength: missinggo.MiB,
|
||||
},
|
||||
}
|
||||
ts, err := s.OpenTorrent(info)
|
||||
ts, err := s.OpenTorrent(info, metainfo.Hash{})
|
||||
assert.NoError(t, err)
|
||||
f, err := os.Create(filepath.Join(td, "a"))
|
||||
err = f.Truncate(1)
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
|
||||
// Represents data storage for an unspecified torrent.
|
||||
type Client interface {
|
||||
OpenTorrent(info *metainfo.InfoEx) (Torrent, error)
|
||||
OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (Torrent, error)
|
||||
}
|
||||
|
||||
// Data storage bound to a torrent.
|
||||
|
|
|
@ -6,31 +6,26 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/anacrolix/missinggo/resource"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
)
|
||||
|
||||
// Two different torrents opened from the same storage. Closing one should not
|
||||
// break the piece completion on the other.
|
||||
func testIssue95(t *testing.T, c Client) {
|
||||
i1 := &metainfo.InfoEx{
|
||||
Bytes: []byte("a"),
|
||||
Info: metainfo.Info{
|
||||
i1 := &metainfo.Info{
|
||||
Files: []metainfo.FileInfo{{Path: []string{"a"}}},
|
||||
Pieces: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
t1, err := c.OpenTorrent(i1)
|
||||
t1, err := c.OpenTorrent(i1, metainfo.HashBytes([]byte("a")))
|
||||
require.NoError(t, err)
|
||||
i2 := &metainfo.InfoEx{
|
||||
Bytes: []byte("b"),
|
||||
Info: metainfo.Info{
|
||||
i2 := &metainfo.Info{
|
||||
Files: []metainfo.FileInfo{{Path: []string{"a"}}},
|
||||
Pieces: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
t2, err := c.OpenTorrent(i2)
|
||||
t2, err := c.OpenTorrent(i2, metainfo.HashBytes([]byte("b")))
|
||||
require.NoError(t, err)
|
||||
t2p := t2.Piece(i2.Piece(0))
|
||||
assert.NoError(t, t1.Close())
|
||||
|
|
|
@ -15,13 +15,11 @@ func testMarkedCompleteMissingOnRead(t *testing.T, csf func(string) Client) {
|
|||
require.NoError(t, err)
|
||||
defer os.RemoveAll(td)
|
||||
cs := csf(td)
|
||||
info := &metainfo.InfoEx{
|
||||
Info: metainfo.Info{
|
||||
info := &metainfo.Info{
|
||||
PieceLength: 1,
|
||||
Files: []metainfo.FileInfo{{Path: []string{"a"}, Length: 1}},
|
||||
},
|
||||
}
|
||||
ts, err := cs.OpenTorrent(info)
|
||||
ts, err := cs.OpenTorrent(info, metainfo.Hash{})
|
||||
require.NoError(t, err)
|
||||
p := ts.Piece(info.Piece(0))
|
||||
require.NoError(t, p.MarkComplete())
|
||||
|
|
|
@ -23,8 +23,8 @@ func NewMMap(baseDir string) Client {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *mmapStorage) OpenTorrent(info *metainfo.InfoEx) (t Torrent, err error) {
|
||||
span, err := mMapTorrent(&info.Info, s.baseDir)
|
||||
func (s *mmapStorage) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (t Torrent, err error) {
|
||||
span, err := mMapTorrent(info, s.baseDir)
|
||||
t = &mmapTorrentStorage{
|
||||
span: span,
|
||||
pc: pieceCompletionForDir(s.baseDir),
|
||||
|
@ -54,17 +54,22 @@ func (ts *mmapTorrentStorage) Close() error {
|
|||
type mmapStoragePiece struct {
|
||||
pc pieceCompletion
|
||||
p metainfo.Piece
|
||||
ih metainfo.Hash
|
||||
io.ReaderAt
|
||||
io.WriterAt
|
||||
}
|
||||
|
||||
func (me mmapStoragePiece) pieceKey() metainfo.PieceKey {
|
||||
return metainfo.PieceKey{me.ih, me.p.Index()}
|
||||
}
|
||||
|
||||
func (sp mmapStoragePiece) GetIsComplete() (ret bool) {
|
||||
ret, _ = sp.pc.Get(sp.p)
|
||||
ret, _ = sp.pc.Get(sp.pieceKey())
|
||||
return
|
||||
}
|
||||
|
||||
func (sp mmapStoragePiece) MarkComplete() error {
|
||||
sp.pc.Set(sp.p, true)
|
||||
sp.pc.Set(sp.pieceKey(), true)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ type pieceFileTorrentStorage struct {
|
|||
s *pieceFileStorage
|
||||
}
|
||||
|
||||
func (s *pieceFileStorage) OpenTorrent(info *metainfo.InfoEx) (Torrent, error) {
|
||||
func (s *pieceFileStorage) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (Torrent, error) {
|
||||
return &pieceFileTorrentStorage{s}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ func NewResourcePieces(p resource.Provider) Client {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *piecePerResource) OpenTorrent(info *metainfo.InfoEx) (Torrent, error) {
|
||||
func (s *piecePerResource) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (Torrent, error) {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
|
|
4
t.go
4
t.go
|
@ -24,7 +24,7 @@ func (t *Torrent) GotInfo() <-chan struct{} {
|
|||
}
|
||||
|
||||
// Returns the metainfo info dictionary, or nil if it's not yet available.
|
||||
func (t *Torrent) Info() *metainfo.InfoEx {
|
||||
func (t *Torrent) Info() *metainfo.Info {
|
||||
return t.info
|
||||
}
|
||||
|
||||
|
@ -117,7 +117,7 @@ func (t *Torrent) Length() int64 {
|
|||
|
||||
// Returns a run-time generated metainfo for the torrent that includes the
|
||||
// info bytes and announce-list as currently known to the client.
|
||||
func (t *Torrent) Metainfo() *metainfo.MetaInfo {
|
||||
func (t *Torrent) Metainfo() metainfo.MetaInfo {
|
||||
t.cl.mu.Lock()
|
||||
defer t.cl.mu.Unlock()
|
||||
return t.newMetaInfo()
|
||||
|
|
29
torrent.go
29
torrent.go
|
@ -62,7 +62,7 @@ type Torrent struct {
|
|||
metainfo metainfo.MetaInfo
|
||||
|
||||
// The info dict. nil if we don't have it (yet).
|
||||
info *metainfo.InfoEx
|
||||
info *metainfo.Info
|
||||
// Active peer connections, running message stream loops.
|
||||
conns []*connection
|
||||
maxEstablishedConns int
|
||||
|
@ -210,7 +210,7 @@ func infoPieceHashes(info *metainfo.Info) (ret []string) {
|
|||
}
|
||||
|
||||
func (t *Torrent) makePieces() {
|
||||
hashes := infoPieceHashes(&t.info.Info)
|
||||
hashes := infoPieceHashes(t.info)
|
||||
t.pieces = make([]piece, len(hashes))
|
||||
for i, hash := range hashes {
|
||||
piece := &t.pieces[i]
|
||||
|
@ -226,24 +226,24 @@ func (t *Torrent) setInfoBytes(b []byte) error {
|
|||
if t.haveInfo() {
|
||||
return nil
|
||||
}
|
||||
var ie *metainfo.InfoEx
|
||||
err := bencode.Unmarshal(b, &ie)
|
||||
if metainfo.HashBytes(b) != t.infoHash {
|
||||
return errors.New("info bytes have wrong hash")
|
||||
}
|
||||
var info metainfo.Info
|
||||
err := bencode.Unmarshal(b, &info)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error unmarshalling info bytes: %s", err)
|
||||
}
|
||||
if ie.Hash() != t.infoHash {
|
||||
return errors.New("info bytes have wrong hash")
|
||||
}
|
||||
err = validateInfo(&ie.Info)
|
||||
err = validateInfo(&info)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad info: %s", err)
|
||||
}
|
||||
defer t.updateWantPeersEvent()
|
||||
t.info = ie
|
||||
t.info = &info
|
||||
t.displayName = "" // Save a few bytes lol.
|
||||
t.cl.event.Broadcast()
|
||||
t.gotMetainfo.Set()
|
||||
t.storage, err = t.storageOpener.OpenTorrent(t.info)
|
||||
t.storage, err = t.storageOpener.OpenTorrent(t.info, t.infoHash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening torrent storage: %s", err)
|
||||
}
|
||||
|
@ -475,17 +475,14 @@ func (t *Torrent) announceList() (al [][]string) {
|
|||
|
||||
// Returns a run-time generated MetaInfo that includes the info bytes and
|
||||
// announce-list as currently known to the client.
|
||||
func (t *Torrent) newMetaInfo() (mi *metainfo.MetaInfo) {
|
||||
mi = &metainfo.MetaInfo{
|
||||
func (t *Torrent) newMetaInfo() metainfo.MetaInfo {
|
||||
return metainfo.MetaInfo{
|
||||
CreationDate: time.Now().Unix(),
|
||||
Comment: "dynamic metainfo from client",
|
||||
CreatedBy: "go.torrent",
|
||||
AnnounceList: t.announceList(),
|
||||
InfoBytes: t.metadataBytes,
|
||||
}
|
||||
if t.info != nil {
|
||||
mi.Info = *t.info
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *Torrent) BytesMissing() int64 {
|
||||
|
|
|
@ -69,7 +69,7 @@ func torrentFileInfoHash(fileName string) (ih metainfo.Hash, ok bool) {
|
|||
if mi == nil {
|
||||
return
|
||||
}
|
||||
ih = mi.Info.Hash()
|
||||
ih = mi.HashInfoBytes()
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue