Remove the InfoEx type, and don't generate its infohash on the fly

Fixes #106.
This commit is contained in:
Matt Joiner 2016-08-26 20:29:05 +10:00
parent 7b2561cea8
commit 2a1cef7c9e
37 changed files with 402 additions and 378 deletions

View File

@ -1407,7 +1407,7 @@ type TorrentSpec struct {
// The tiered tracker URIs. // The tiered tracker URIs.
Trackers [][]string Trackers [][]string
InfoHash metainfo.Hash InfoHash metainfo.Hash
Info *metainfo.InfoEx InfoBytes []byte
// The name to use if the Name field from the Info isn't available. // The name to use if the Name field from the Info isn't available.
DisplayName string DisplayName string
// The chunk size to use for outbound requests. Defaults to 16KiB if not // The chunk size to use for outbound requests. Defaults to 16KiB if not
@ -1430,11 +1430,12 @@ func TorrentSpecFromMagnetURI(uri string) (spec *TorrentSpec, err error) {
} }
func TorrentSpecFromMetaInfo(mi *metainfo.MetaInfo) (spec *TorrentSpec) { func TorrentSpecFromMetaInfo(mi *metainfo.MetaInfo) (spec *TorrentSpec) {
info := mi.UnmarshalInfo()
spec = &TorrentSpec{ spec = &TorrentSpec{
Trackers: mi.AnnounceList, Trackers: mi.AnnounceList,
Info: &mi.Info, InfoBytes: mi.InfoBytes,
DisplayName: mi.Info.Name, DisplayName: info.Name,
InfoHash: mi.Info.Hash(), InfoHash: mi.HashInfoBytes(),
} }
if spec.Trackers == nil && mi.Announce != "" { if spec.Trackers == nil && mi.Announce != "" {
spec.Trackers = [][]string{{mi.Announce}} spec.Trackers = [][]string{{mi.Announce}}
@ -1470,8 +1471,8 @@ func (cl *Client) AddTorrentSpec(spec *TorrentSpec) (t *Torrent, new bool, err e
if spec.DisplayName != "" { if spec.DisplayName != "" {
t.SetDisplayName(spec.DisplayName) t.SetDisplayName(spec.DisplayName)
} }
if spec.Info != nil { if spec.InfoBytes != nil {
err = t.SetInfoBytes(spec.Info.Bytes) err = t.SetInfoBytes(spec.InfoBytes)
if err != nil { if err != nil {
return return
} }

View File

@ -88,14 +88,14 @@ func TestTorrentInitialState(t *testing.T) {
dir, mi := testutil.GreetingTestTorrent() dir, mi := testutil.GreetingTestTorrent()
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
tor := &Torrent{ tor := &Torrent{
infoHash: mi.Info.Hash(), infoHash: mi.HashInfoBytes(),
pieceStateChanges: pubsub.NewPubSub(), pieceStateChanges: pubsub.NewPubSub(),
} }
tor.chunkSize = 2 tor.chunkSize = 2
tor.storageOpener = storage.NewFile("/dev/null") tor.storageOpener = storage.NewFile("/dev/null")
// Needed to lock for asynchronous piece verification. // Needed to lock for asynchronous piece verification.
tor.cl = new(Client) tor.cl = new(Client)
err := tor.setInfoBytes(mi.Info.Bytes) err := tor.setInfoBytes(mi.InfoBytes)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, tor.pieces, 3) require.Len(t, tor.pieces, 3)
tor.pendAllChunkSpecs(0) tor.pendAllChunkSpecs(0)
@ -492,7 +492,7 @@ func TestMergingTrackersByAddingSpecs(t *testing.T) {
type badStorage struct{} type badStorage struct{}
func (bs badStorage) OpenTorrent(*metainfo.InfoEx) (storage.Torrent, error) { func (bs badStorage) OpenTorrent(*metainfo.Info, metainfo.Hash) (storage.Torrent, error) {
return bs, nil return bs, nil
} }
@ -536,26 +536,24 @@ func TestCompletedPieceWrongSize(t *testing.T) {
cl, err := NewClient(&cfg) cl, err := NewClient(&cfg)
require.NoError(t, err) require.NoError(t, err)
defer cl.Close() defer cl.Close()
ie := metainfo.InfoEx{ info := metainfo.Info{
Info: metainfo.Info{ PieceLength: 15,
PieceLength: 15, Pieces: make([]byte, 20),
Pieces: make([]byte, 20), Files: []metainfo.FileInfo{
Files: []metainfo.FileInfo{ metainfo.FileInfo{Path: []string{"greeting"}, Length: 13},
metainfo.FileInfo{Path: []string{"greeting"}, Length: 13},
},
}, },
} }
ie.UpdateBytes() b, err := bencode.Marshal(info)
tt, new, err := cl.AddTorrentSpec(&TorrentSpec{ tt, new, err := cl.AddTorrentSpec(&TorrentSpec{
Info: &ie, InfoBytes: b,
InfoHash: ie.Hash(), InfoHash: metainfo.HashBytes(b),
}) })
require.NoError(t, err) require.NoError(t, err)
defer tt.Drop() defer tt.Drop()
assert.True(t, new) assert.True(t, new)
r := tt.NewReader() r := tt.NewReader()
defer r.Close() defer r.Close()
b, err := ioutil.ReadAll(r) b, err = ioutil.ReadAll(r)
assert.Len(t, b, 13) assert.Len(t, b, 13)
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -681,7 +679,7 @@ func TestAddTorrentSpecMerging(t *testing.T) {
dir, mi := testutil.GreetingTestTorrent() dir, mi := testutil.GreetingTestTorrent()
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
tt, new, err := cl.AddTorrentSpec(&TorrentSpec{ tt, new, err := cl.AddTorrentSpec(&TorrentSpec{
InfoHash: mi.Info.Hash(), InfoHash: mi.HashInfoBytes(),
}) })
require.NoError(t, err) require.NoError(t, err)
require.True(t, new) require.True(t, new)
@ -698,7 +696,7 @@ func TestTorrentDroppedBeforeGotInfo(t *testing.T) {
cl, _ := NewClient(&TestingConfig) cl, _ := NewClient(&TestingConfig)
defer cl.Close() defer cl.Close()
tt, _, _ := cl.AddTorrentSpec(&TorrentSpec{ tt, _, _ := cl.AddTorrentSpec(&TorrentSpec{
InfoHash: mi.Info.Hash(), InfoHash: mi.HashInfoBytes(),
}) })
tt.Drop() tt.Drop()
assert.EqualValues(t, 0, len(cl.Torrents())) assert.EqualValues(t, 0, len(cl.Torrents()))
@ -709,7 +707,7 @@ func TestTorrentDroppedBeforeGotInfo(t *testing.T) {
} }
} }
func writeTorrentData(ts storage.Torrent, info *metainfo.InfoEx, b []byte) { func writeTorrentData(ts storage.Torrent, info metainfo.Info, b []byte) {
for i := range iter.N(info.NumPieces()) { for i := range iter.N(info.NumPieces()) {
n, _ := ts.Piece(info.Piece(i)).WriteAt(b, 0) n, _ := ts.Piece(info.Piece(i)).WriteAt(b, 0)
b = b[n:] b = b[n:]
@ -725,13 +723,15 @@ func testAddTorrentPriorPieceCompletion(t *testing.T, alreadyCompleted bool, csf
greetingDataTempDir, greetingMetainfo := testutil.GreetingTestTorrent() greetingDataTempDir, greetingMetainfo := testutil.GreetingTestTorrent()
defer os.RemoveAll(greetingDataTempDir) defer os.RemoveAll(greetingDataTempDir)
filePieceStore := csf(fileCache) filePieceStore := csf(fileCache)
greetingData, err := filePieceStore.OpenTorrent(&greetingMetainfo.Info) info := greetingMetainfo.UnmarshalInfo()
ih := greetingMetainfo.HashInfoBytes()
greetingData, err := filePieceStore.OpenTorrent(&info, ih)
require.NoError(t, err) require.NoError(t, err)
writeTorrentData(greetingData, &greetingMetainfo.Info, []byte(testutil.GreetingFileContents)) writeTorrentData(greetingData, info, []byte(testutil.GreetingFileContents))
// require.Equal(t, len(testutil.GreetingFileContents), written) // require.Equal(t, len(testutil.GreetingFileContents), written)
// require.NoError(t, err) // require.NoError(t, err)
for i := 0; i < greetingMetainfo.Info.NumPieces(); i++ { for i := 0; i < info.NumPieces(); i++ {
p := greetingMetainfo.Info.Piece(i) p := info.Piece(i)
if alreadyCompleted { if alreadyCompleted {
err := greetingData.Piece(p).MarkComplete() err := greetingData.Piece(p).MarkComplete()
assert.NoError(t, err) assert.NoError(t, err)
@ -871,17 +871,16 @@ func TestPeerInvalidHave(t *testing.T) {
cl, err := NewClient(&TestingConfig) cl, err := NewClient(&TestingConfig)
require.NoError(t, err) require.NoError(t, err)
defer cl.Close() defer cl.Close()
ie := metainfo.InfoEx{ info := metainfo.Info{
Info: metainfo.Info{ PieceLength: 1,
PieceLength: 1, Pieces: make([]byte, 20),
Pieces: make([]byte, 20), Files: []metainfo.FileInfo{{Length: 1}},
Files: []metainfo.FileInfo{{Length: 1}},
},
} }
ie.UpdateBytes() infoBytes, err := bencode.Marshal(info)
require.NoError(t, err)
tt, _new, err := cl.AddTorrentSpec(&TorrentSpec{ tt, _new, err := cl.AddTorrentSpec(&TorrentSpec{
Info: &ie, InfoBytes: infoBytes,
InfoHash: ie.Hash(), InfoHash: metainfo.HashBytes(infoBytes),
}) })
require.NoError(t, err) require.NoError(t, err)
assert.True(t, _new) assert.True(t, _new)
@ -901,7 +900,7 @@ func TestPieceCompletedInStorageButNotClient(t *testing.T) {
seeder, err := NewClient(&TestingConfig) seeder, err := NewClient(&TestingConfig)
require.NoError(t, err) require.NoError(t, err)
seeder.AddTorrentSpec(&TorrentSpec{ seeder.AddTorrentSpec(&TorrentSpec{
Info: &greetingMetainfo.Info, InfoBytes: greetingMetainfo.InfoBytes,
}) })
} }
@ -980,7 +979,7 @@ func totalConns(tts []*Torrent) (ret int) {
func TestSetMaxEstablishedConn(t *testing.T) { func TestSetMaxEstablishedConn(t *testing.T) {
var tts []*Torrent var tts []*Torrent
ih := testutil.GreetingMetaInfo().Info.Hash() ih := testutil.GreetingMetaInfo().HashInfoBytes()
cfg := TestingConfig cfg := TestingConfig
for i := range iter.N(3) { for i := range iter.N(3) {
cl, err := NewClient(&cfg) cl, err := NewClient(&cfg)

View File

@ -29,7 +29,7 @@ func main() {
<-t.GotInfo() <-t.GotInfo()
mi := t.Metainfo() mi := t.Metainfo()
t.Drop() t.Drop()
f, err := os.Create(mi.Info.Name + ".torrent") f, err := os.Create(mi.UnmarshalInfo().Name + ".torrent")
if err != nil { if err != nil {
log.Fatalf("error creating torrent metainfo file: %s", err) log.Fatalf("error creating torrent metainfo file: %s", err)
} }

View File

@ -6,6 +6,7 @@ import (
"github.com/anacrolix/tagflag" "github.com/anacrolix/tagflag"
"github.com/anacrolix/torrent/bencode"
"github.com/anacrolix/torrent/metainfo" "github.com/anacrolix/torrent/metainfo"
) )
@ -32,7 +33,14 @@ func main() {
mi.AnnounceList = append(mi.AnnounceList, []string{a}) mi.AnnounceList = append(mi.AnnounceList, []string{a})
} }
mi.SetDefaults() mi.SetDefaults()
err := mi.Info.BuildFromFilePath(args.Root) info := metainfo.Info{
PieceLength: 256 * 1024,
}
err := info.BuildFromFilePath(args.Root)
if err != nil {
log.Fatal(err)
}
mi.InfoBytes, err = bencode.Marshal(info)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }

View File

@ -20,6 +20,6 @@ func main() {
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
fmt.Printf("%s: %s\n", mi.Info.Hash().HexString(), arg) fmt.Printf("%s: %s\n", mi.HashInfoBytes().HexString(), arg)
} }
} }

View File

@ -17,6 +17,7 @@ func main() {
fmt.Fprintf(os.Stderr, "error reading metainfo from stdin: %s", err) fmt.Fprintf(os.Stderr, "error reading metainfo from stdin: %s", err)
os.Exit(1) os.Exit(1)
} }
info := mi.UnmarshalInfo()
fmt.Fprintf(os.Stdout, "%s\n", mi.Magnet().String()) fmt.Fprintf(os.Stdout, "%s\n", mi.Magnet(info.Name, mi.HashInfoBytes()).String())
} }

View File

@ -29,16 +29,16 @@ func main() {
log.Print(err) log.Print(err)
continue continue
} }
info := &metainfo.Info.Info info := metainfo.UnmarshalInfo()
if flags.JustName { if flags.JustName {
fmt.Printf("%s\n", metainfo.Info.Name) fmt.Printf("%s\n", info.Name)
continue continue
} }
d := map[string]interface{}{ d := map[string]interface{}{
"Name": info.Name, "Name": info.Name,
"NumPieces": info.NumPieces(), "NumPieces": info.NumPieces(),
"PieceLength": info.PieceLength, "PieceLength": info.PieceLength,
"InfoHash": metainfo.Info.Hash().HexString(), "InfoHash": metainfo.HashInfoBytes().HexString(),
"NumFiles": len(info.UpvertedFiles()), "NumFiles": len(info.UpvertedFiles()),
"TotalLength": info.TotalLength(), "TotalLength": info.TotalLength(),
"Announce": metainfo.Announce, "Announce": metainfo.Announce,

View File

@ -49,22 +49,22 @@ func main() {
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
info := metaInfo.UnmarshalInfo()
mMapSpan := &mmap_span.MMapSpan{} mMapSpan := &mmap_span.MMapSpan{}
if len(metaInfo.Info.Files) > 0 { if len(info.Files) > 0 {
for _, file := range metaInfo.Info.Files { for _, file := range info.Files {
filename := filepath.Join(append([]string{*dataPath, metaInfo.Info.Name}, file.Path...)...) filename := filepath.Join(append([]string{*dataPath, info.Name}, file.Path...)...)
goMMap := fileToMmap(filename, file.Length) goMMap := fileToMmap(filename, file.Length)
mMapSpan.Append(goMMap) mMapSpan.Append(goMMap)
} }
log.Println(len(metaInfo.Info.Files)) log.Println(len(info.Files))
} else { } else {
goMMap := fileToMmap(*dataPath, metaInfo.Info.Length) goMMap := fileToMmap(*dataPath, info.Length)
mMapSpan.Append(goMMap) mMapSpan.Append(goMMap)
} }
log.Println(mMapSpan.Size()) log.Println(mMapSpan.Size())
log.Println(len(metaInfo.Info.Pieces)) log.Println(len(info.Pieces))
info := metaInfo.Info for i := range iter.N(info.NumPieces()) {
for i := range iter.N(metaInfo.Info.NumPieces()) {
p := info.Piece(i) p := info.Piece(i)
hash := sha1.New() hash := sha1.New()
_, err := io.Copy(hash, io.NewSectionReader(mMapSpan, p.Offset(), p.Length())) _, err := io.Copy(hash, io.NewSectionReader(mMapSpan, p.Offset(), p.Length()))

View File

@ -49,7 +49,7 @@ type rootNode struct {
type node struct { type node struct {
path string path string
metadata *metainfo.InfoEx metadata *metainfo.Info
FS *TorrentFS FS *TorrentFS
t *torrent.Torrent t *torrent.Torrent
} }

View File

@ -97,7 +97,7 @@ func TestUnmountWedged(t *testing.T) {
}) })
require.NoError(t, err) require.NoError(t, err)
defer client.Close() defer client.Close()
_, err = client.AddTorrent(layout.Metainfo) tt, err := client.AddTorrent(layout.Metainfo)
require.NoError(t, err) require.NoError(t, err)
fs := New(client) fs := New(client)
fuseConn, err := fuse.Mount(layout.MountDir) fuseConn, err := fuse.Mount(layout.MountDir)
@ -124,7 +124,7 @@ func TestUnmountWedged(t *testing.T) {
// "wedge" FUSE, requiring the fs object to be forcibly destroyed. The // "wedge" FUSE, requiring the fs object to be forcibly destroyed. The
// read call will return with a FS error. // read call will return with a FS error.
go func() { go func() {
_, err := ioutil.ReadFile(filepath.Join(layout.MountDir, layout.Metainfo.Info.Name)) _, err := ioutil.ReadFile(filepath.Join(layout.MountDir, tt.Info().Name))
if err == nil { if err == nil {
t.Fatal("expected error reading greeting") t.Fatal("expected error reading greeting")
} }
@ -169,7 +169,7 @@ func TestDownloadOnDemand(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer seeder.Close() defer seeder.Close()
testutil.ExportStatusWriter(seeder, "s") testutil.ExportStatusWriter(seeder, "s")
_, err = seeder.AddMagnet(fmt.Sprintf("magnet:?xt=urn:btih:%s", layout.Metainfo.Info.Hash().HexString())) _, err = seeder.AddMagnet(fmt.Sprintf("magnet:?xt=urn:btih:%s", layout.Metainfo.HashInfoBytes().HexString()))
require.NoError(t, err) require.NoError(t, err)
leecher, err := torrent.NewClient(&torrent.Config{ leecher, err := torrent.NewClient(&torrent.Config{
DisableTrackers: true, DisableTrackers: true,

View File

@ -16,6 +16,7 @@ import (
"github.com/anacrolix/missinggo" "github.com/anacrolix/missinggo"
"github.com/anacrolix/torrent/bencode"
"github.com/anacrolix/torrent/metainfo" "github.com/anacrolix/torrent/metainfo"
) )
@ -30,19 +31,25 @@ func CreateDummyTorrentData(dirName string) string {
f.WriteString(GreetingFileContents) f.WriteString(GreetingFileContents)
return f.Name() return f.Name()
} }
func GreetingMetaInfo() (mi *metainfo.MetaInfo) {
mi = new(metainfo.MetaInfo) func GreetingMetaInfo() *metainfo.MetaInfo {
mi.Info.Name = GreetingFileName info := metainfo.Info{
mi.Info.Length = int64(len(GreetingFileContents)) Name: GreetingFileName,
mi.Info.PieceLength = 5 Length: int64(len(GreetingFileContents)),
err := mi.Info.GeneratePieces(func(metainfo.FileInfo) (io.ReadCloser, error) { PieceLength: 5,
}
err := info.GeneratePieces(func(metainfo.FileInfo) (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader(GreetingFileContents)), nil return ioutil.NopCloser(strings.NewReader(GreetingFileContents)), nil
}) })
if err != nil { if err != nil {
panic(err) panic(err)
} }
mi.Info.UpdateBytes() mi := &metainfo.MetaInfo{}
return mi.InfoBytes, err = bencode.Marshal(info)
if err != nil {
panic(err)
}
return mi
} }
// Gives a temporary directory containing the completed "greeting" torrent, // Gives a temporary directory containing the completed "greeting" torrent,

View File

@ -17,9 +17,11 @@ func TestHashPieceAfterStorageClosed(t *testing.T) {
defer os.RemoveAll(td) defer os.RemoveAll(td)
cs := storage.NewFile(td) cs := storage.NewFile(td)
tt := &Torrent{} tt := &Torrent{}
tt.info = &testutil.GreetingMetaInfo().Info mi := testutil.GreetingMetaInfo()
info := mi.UnmarshalInfo()
tt.info = &info
tt.makePieces() tt.makePieces()
tt.storage, err = cs.OpenTorrent(tt.info) tt.storage, err = cs.OpenTorrent(tt.info, mi.HashInfoBytes())
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, tt.storage.Close()) require.NoError(t, tt.storage.Close())
tt.hashPiece(0) tt.hashPiece(0)

156
metainfo/info.go Normal file
View File

@ -0,0 +1,156 @@
package metainfo
import (
"crypto/sha1"
"errors"
"fmt"
"io"
"log"
"os"
"path/filepath"
"strings"
"github.com/anacrolix/missinggo/slices"
)
// The info dictionary.
type Info struct {
PieceLength int64 `bencode:"piece length"`
Pieces []byte `bencode:"pieces"`
Name string `bencode:"name"`
Length int64 `bencode:"length,omitempty"`
Private *bool `bencode:"private,omitempty"`
Files []FileInfo `bencode:"files,omitempty"`
}
// This is a helper that sets Files and Pieces from a root path and its
// children.
func (info *Info) BuildFromFilePath(root string) (err error) {
info.Name = filepath.Base(root)
info.Files = nil
err = filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if fi.IsDir() {
// Directories are implicit in torrent files.
return nil
} else if path == root {
// The root is a file.
info.Length = fi.Size()
return nil
}
relPath, err := filepath.Rel(root, path)
log.Println(relPath, err)
if err != nil {
return fmt.Errorf("error getting relative path: %s", err)
}
info.Files = append(info.Files, FileInfo{
Path: strings.Split(relPath, string(filepath.Separator)),
Length: fi.Size(),
})
return nil
})
if err != nil {
return
}
slices.Sort(info.Files, func(l, r FileInfo) bool {
return strings.Join(l.Path, "/") < strings.Join(r.Path, "/")
})
err = info.GeneratePieces(func(fi FileInfo) (io.ReadCloser, error) {
return os.Open(filepath.Join(root, strings.Join(fi.Path, string(filepath.Separator))))
})
if err != nil {
err = fmt.Errorf("error generating pieces: %s", err)
}
return
}
func (info *Info) writeFiles(w io.Writer, open func(fi FileInfo) (io.ReadCloser, error)) error {
for _, fi := range info.UpvertedFiles() {
r, err := open(fi)
if err != nil {
return fmt.Errorf("error opening %v: %s", fi, err)
}
wn, err := io.CopyN(w, r, fi.Length)
r.Close()
if wn != fi.Length || err != nil {
return fmt.Errorf("error hashing %v: %s", fi, err)
}
}
return nil
}
// Set info.Pieces by hashing info.Files.
func (info *Info) GeneratePieces(open func(fi FileInfo) (io.ReadCloser, error)) error {
if info.PieceLength == 0 {
return errors.New("piece length must be non-zero")
}
pr, pw := io.Pipe()
go func() {
err := info.writeFiles(pw, open)
pw.CloseWithError(err)
}()
defer pr.Close()
var pieces []byte
for {
hasher := sha1.New()
wn, err := io.CopyN(hasher, pr, info.PieceLength)
if err == io.EOF {
err = nil
}
if err != nil {
return err
}
if wn == 0 {
break
}
pieces = hasher.Sum(pieces)
if wn < info.PieceLength {
break
}
}
info.Pieces = pieces
return nil
}
func (info *Info) TotalLength() (ret int64) {
if info.IsDir() {
for _, fi := range info.Files {
ret += fi.Length
}
} else {
ret = info.Length
}
return
}
func (info *Info) NumPieces() int {
if len(info.Pieces)%20 != 0 {
panic(len(info.Pieces))
}
return len(info.Pieces) / 20
}
func (info *Info) IsDir() bool {
return len(info.Files) != 0
}
// The files field, converted up from the old single-file in the parent info
// dict if necessary. This is a helper to avoid having to conditionally handle
// single and multi-file torrent infos.
func (info *Info) UpvertedFiles() []FileInfo {
if len(info.Files) == 0 {
return []FileInfo{{
Length: info.Length,
// Callers should determine that Info.Name is the basename, and
// thus a regular file.
Path: nil,
}}
}
return info.Files
}
func (info *Info) Piece(index int) Piece {
return Piece{info, index}
}

16
metainfo/info_test.go Normal file
View File

@ -0,0 +1,16 @@
package metainfo
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/anacrolix/torrent/bencode"
)
func TestMarshalInfo(t *testing.T) {
var info Info
b, err := bencode.Marshal(info)
assert.NoError(t, err)
assert.EqualValues(t, "d4:name0:12:piece lengthi0e6:pieceslee", string(b))
}

View File

@ -1,47 +0,0 @@
package metainfo
import "github.com/anacrolix/torrent/bencode"
// A wrapper around Info that exposes the Bytes directly, in case marshalling
// and unmarshalling Info doesn't produce the same bytes.
type InfoEx struct {
Info
// Set when unmarshalling, and used when marshalling. Call .UpdateBytes to
// set it by bencoding Info.
Bytes []byte
}
var (
_ bencode.Marshaler = &InfoEx{}
_ bencode.Unmarshaler = &InfoEx{}
)
// Marshals .Info, and sets .Bytes with the result.
func (ie *InfoEx) UpdateBytes() {
var err error
ie.Bytes, err = bencode.Marshal(&ie.Info)
if err != nil {
panic(err)
}
}
// Returns the SHA1 hash of .Bytes.
func (ie *InfoEx) Hash() Hash {
return HashBytes(ie.Bytes)
}
func (ie *InfoEx) UnmarshalBencode(data []byte) error {
ie.Bytes = append([]byte(nil), data...)
return bencode.Unmarshal(data, &ie.Info)
}
func (ie *InfoEx) MarshalBencode() ([]byte, error) {
if ie.Bytes == nil {
ie.UpdateBytes()
}
return ie.Bytes, nil
}
func (info *InfoEx) Piece(i int) Piece {
return Piece{info, i}
}

View File

@ -70,11 +70,11 @@ func TestParseMagnetURI(t *testing.T) {
} }
func Test_Magnetize(t *testing.T) { func TestMagnetize(t *testing.T) {
mi, err := LoadFromFile("../testdata/bootstrap.dat.torrent") mi, err := LoadFromFile("../testdata/bootstrap.dat.torrent")
require.NoError(t, err) require.NoError(t, err)
m := mi.Magnet() m := mi.Magnet(mi.UnmarshalInfo().Name, mi.HashInfoBytes())
assert.EqualValues(t, "bootstrap.dat", m.DisplayName) assert.EqualValues(t, "bootstrap.dat", m.DisplayName)

View File

@ -1,20 +1,26 @@
package metainfo package metainfo
import ( import (
"crypto/sha1"
"errors"
"fmt" "fmt"
"io" "io"
"log"
"os" "os"
"path/filepath"
"strings"
"time" "time"
"github.com/anacrolix/missinggo/slices"
"github.com/anacrolix/torrent/bencode" "github.com/anacrolix/torrent/bencode"
) )
type MetaInfo struct {
InfoBytes bencode.Bytes `bencode:"info"`
Announce string `bencode:"announce,omitempty"`
AnnounceList [][]string `bencode:"announce-list,omitempty"`
Nodes []Node `bencode:"nodes,omitempty"`
CreationDate int64 `bencode:"creation date,omitempty"`
Comment string `bencode:"comment,omitempty"`
CreatedBy string `bencode:"created by,omitempty"`
Encoding string `bencode:"encoding,omitempty"`
URLList interface{} `bencode:"url-list,omitempty"`
}
// Information specific to a single file inside the MetaInfo structure. // Information specific to a single file inside the MetaInfo structure.
type FileInfo struct { type FileInfo struct {
Length int64 `bencode:"length"` Length int64 `bencode:"length"`
@ -43,158 +49,20 @@ func LoadFromFile(filename string) (*MetaInfo, error) {
return Load(f) return Load(f)
} }
// The info dictionary. func (mi MetaInfo) UnmarshalInfo() (info Info) {
type Info struct { err := bencode.Unmarshal(mi.InfoBytes, &info)
PieceLength int64 `bencode:"piece length"`
Pieces []byte `bencode:"pieces"`
Name string `bencode:"name"`
Length int64 `bencode:"length,omitempty"`
Private *bool `bencode:"private,omitempty"`
Files []FileInfo `bencode:"files,omitempty"`
}
// This is a helper that sets Files and Pieces from a root path and its
// children.
func (info *Info) BuildFromFilePath(root string) (err error) {
info.Name = filepath.Base(root)
info.Files = nil
err = filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if fi.IsDir() {
// Directories are implicit in torrent files.
return nil
} else if path == root {
// The root is a file.
info.Length = fi.Size()
return nil
}
relPath, err := filepath.Rel(root, path)
log.Println(relPath, err)
if err != nil {
return fmt.Errorf("error getting relative path: %s", err)
}
info.Files = append(info.Files, FileInfo{
Path: strings.Split(relPath, string(filepath.Separator)),
Length: fi.Size(),
})
return nil
})
if err != nil { if err != nil {
return panic(fmt.Sprintf("bad info bytes: %s", err))
}
slices.Sort(info.Files, func(l, r FileInfo) bool {
return strings.Join(l.Path, "/") < strings.Join(r.Path, "/")
})
err = info.GeneratePieces(func(fi FileInfo) (io.ReadCloser, error) {
return os.Open(filepath.Join(root, strings.Join(fi.Path, string(filepath.Separator))))
})
if err != nil {
err = fmt.Errorf("error generating pieces: %s", err)
} }
return return
} }
func (info *Info) writeFiles(w io.Writer, open func(fi FileInfo) (io.ReadCloser, error)) error { func (mi MetaInfo) HashInfoBytes() (infoHash Hash) {
for _, fi := range info.UpvertedFiles() { return HashBytes(mi.InfoBytes)
r, err := open(fi)
if err != nil {
return fmt.Errorf("error opening %v: %s", fi, err)
}
wn, err := io.CopyN(w, r, fi.Length)
r.Close()
if wn != fi.Length || err != nil {
return fmt.Errorf("error hashing %v: %s", fi, err)
}
}
return nil
}
// Set info.Pieces by hashing info.Files.
func (info *Info) GeneratePieces(open func(fi FileInfo) (io.ReadCloser, error)) error {
if info.PieceLength == 0 {
return errors.New("piece length must be non-zero")
}
pr, pw := io.Pipe()
go func() {
err := info.writeFiles(pw, open)
pw.CloseWithError(err)
}()
defer pr.Close()
var pieces []byte
for {
hasher := sha1.New()
wn, err := io.CopyN(hasher, pr, info.PieceLength)
if err == io.EOF {
err = nil
}
if err != nil {
return err
}
if wn == 0 {
break
}
pieces = hasher.Sum(pieces)
if wn < info.PieceLength {
break
}
}
info.Pieces = pieces
return nil
}
func (info *Info) TotalLength() (ret int64) {
if info.IsDir() {
for _, fi := range info.Files {
ret += fi.Length
}
} else {
ret = info.Length
}
return
}
func (info *Info) NumPieces() int {
if len(info.Pieces)%20 != 0 {
panic(len(info.Pieces))
}
return len(info.Pieces) / 20
}
func (info *Info) IsDir() bool {
return len(info.Files) != 0
}
// The files field, converted up from the old single-file in the parent info
// dict if necessary. This is a helper to avoid having to conditionally handle
// single and multi-file torrent infos.
func (info *Info) UpvertedFiles() []FileInfo {
if len(info.Files) == 0 {
return []FileInfo{{
Length: info.Length,
// Callers should determine that Info.Name is the basename, and
// thus a regular file.
Path: nil,
}}
}
return info.Files
}
type MetaInfo struct {
Info InfoEx `bencode:"info"`
Announce string `bencode:"announce,omitempty"`
AnnounceList [][]string `bencode:"announce-list,omitempty"`
Nodes []Node `bencode:"nodes,omitempty"`
CreationDate int64 `bencode:"creation date,omitempty"`
Comment string `bencode:"comment,omitempty"`
CreatedBy string `bencode:"created by,omitempty"`
Encoding string `bencode:"encoding,omitempty"`
URLList interface{} `bencode:"url-list,omitempty"`
} }
// Encode to bencoded form. // Encode to bencoded form.
func (mi *MetaInfo) Write(w io.Writer) error { func (mi MetaInfo) Write(w io.Writer) error {
return bencode.NewEncoder(w).Encode(mi) return bencode.NewEncoder(w).Encode(mi)
} }
@ -203,11 +71,11 @@ func (mi *MetaInfo) SetDefaults() {
mi.Comment = "yoloham" mi.Comment = "yoloham"
mi.CreatedBy = "github.com/anacrolix/torrent" mi.CreatedBy = "github.com/anacrolix/torrent"
mi.CreationDate = time.Now().Unix() mi.CreationDate = time.Now().Unix()
mi.Info.PieceLength = 256 * 1024 // mi.Info.PieceLength = 256 * 1024
} }
// Creates a Magnet from a MetaInfo. // Creates a Magnet from a MetaInfo.
func (mi *MetaInfo) Magnet() (m Magnet) { func (mi *MetaInfo) Magnet(displayName string, infoHash Hash) (m Magnet) {
for _, tier := range mi.AnnounceList { for _, tier := range mi.AnnounceList {
for _, tracker := range tier { for _, tracker := range tier {
m.Trackers = append(m.Trackers, tracker) m.Trackers = append(m.Trackers, tracker)
@ -216,7 +84,7 @@ func (mi *MetaInfo) Magnet() (m Magnet) {
if m.Trackers == nil && mi.Announce != "" { if m.Trackers == nil && mi.Announce != "" {
m.Trackers = []string{mi.Announce} m.Trackers = []string{mi.Announce}
} }
m.DisplayName = mi.Info.Name m.DisplayName = displayName
m.InfoHash = mi.Info.Hash() m.InfoHash = infoHash
return return
} }

View File

@ -19,11 +19,12 @@ func testFile(t *testing.T, filename string) {
mi, err := LoadFromFile(filename) mi, err := LoadFromFile(filename)
require.NoError(t, err) require.NoError(t, err)
if len(mi.Info.Files) == 1 { info := mi.UnmarshalInfo()
t.Logf("Single file: %s (length: %d)\n", mi.Info.Name, mi.Info.Files[0].Length) if len(info.Files) == 1 {
t.Logf("Single file: %s (length: %d)\n", info.Name, info.Files[0].Length)
} else { } else {
t.Logf("Multiple files: %s\n", mi.Info.Name) t.Logf("Multiple files: %s\n", info.Name)
for _, f := range mi.Info.Files { for _, f := range info.Files {
t.Logf(" - %s (length: %d)\n", path.Join(f.Path...), f.Length) t.Logf(" - %s (length: %d)\n", path.Join(f.Path...), f.Length)
} }
} }
@ -34,9 +35,9 @@ func testFile(t *testing.T, filename string) {
} }
} }
b, err := bencode.Marshal(&mi.Info.Info) b, err := bencode.Marshal(&info)
require.NoError(t, err) require.NoError(t, err)
assert.EqualValues(t, string(b), string(mi.Info.Bytes)) assert.EqualValues(t, string(b), string(mi.InfoBytes))
} }
func TestFile(t *testing.T) { func TestFile(t *testing.T) {
@ -96,3 +97,21 @@ func TestBuildFromFilePathOrder(t *testing.T) {
Path: []string{"b"}, Path: []string{"b"},
}}, info.Files) }}, info.Files)
} }
func testUnmarshal(t *testing.T, input string, expected *MetaInfo) {
var actual MetaInfo
err := bencode.Unmarshal([]byte(input), &actual)
if expected == nil {
assert.Error(t, err)
return
}
assert.NoError(t, err)
assert.EqualValues(t, *expected, actual)
}
func TestUnmarshal(t *testing.T) {
testUnmarshal(t, `de`, &MetaInfo{})
testUnmarshal(t, `d4:infoe`, &MetaInfo{})
testUnmarshal(t, `d4:infoabce`, nil)
testUnmarshal(t, `d4:infodee`, &MetaInfo{InfoBytes: []byte("de")})
}

View File

@ -42,15 +42,16 @@ func TestNodesListPairsBEP5(t *testing.T) {
} }
func testMarshalMetainfo(t *testing.T, expected string, mi *MetaInfo) { func testMarshalMetainfo(t *testing.T, expected string, mi *MetaInfo) {
b, err := bencode.Marshal(mi) b, err := bencode.Marshal(*mi)
assert.NoError(t, err) assert.NoError(t, err)
assert.EqualValues(t, expected, string(b)) assert.EqualValues(t, expected, string(b))
} }
func TestMarshalMetainfoNodes(t *testing.T) { func TestMarshalMetainfoNodes(t *testing.T) {
testMarshalMetainfo(t, "d4:infod4:name0:12:piece lengthi0e6:piecesleee", &MetaInfo{}) testMarshalMetainfo(t, "d4:infodee", &MetaInfo{InfoBytes: []byte("de")})
testMarshalMetainfo(t, "d4:infod4:name0:12:piece lengthi0e6:pieceslee5:nodesl12:1.2.3.4:555514:not a hostportee", &MetaInfo{ testMarshalMetainfo(t, "d4:infod2:hi5:theree5:nodesl12:1.2.3.4:555514:not a hostportee", &MetaInfo{
Nodes: []Node{"1.2.3.4:5555", "not a hostport"}, Nodes: []Node{"1.2.3.4:5555", "not a hostport"},
InfoBytes: []byte("d2:hi5:theree"),
}) })
} }

View File

@ -3,7 +3,7 @@ package metainfo
import "github.com/anacrolix/missinggo" import "github.com/anacrolix/missinggo"
type Piece struct { type Piece struct {
Info *InfoEx Info *Info
i int i int
} }
@ -26,12 +26,3 @@ func (p Piece) Hash() (ret Hash) {
func (p Piece) Index() int { func (p Piece) Index() int {
return p.i return p.i
} }
func (p Piece) Key() PieceKey {
return PieceKey{p.Info.Hash(), p.i}
}
type PieceKey struct {
Hash Hash
Index int
}

6
metainfo/piece_key.go Normal file
View File

@ -0,0 +1,6 @@
package metainfo
type PieceKey struct {
InfoHash Hash
Index int
}

View File

@ -7,8 +7,8 @@ import (
) )
type pieceCompletion interface { type pieceCompletion interface {
Get(metainfo.Piece) (bool, error) Get(metainfo.PieceKey) (bool, error)
Set(metainfo.Piece, bool) error Set(metainfo.PieceKey, bool) error
Close() Close()
} }

View File

@ -10,19 +10,19 @@ type mapPieceCompletion struct {
func (mapPieceCompletion) Close() {} func (mapPieceCompletion) Close() {}
func (me *mapPieceCompletion) Get(p metainfo.Piece) (bool, error) { func (me *mapPieceCompletion) Get(pk metainfo.PieceKey) (bool, error) {
_, ok := me.m[p.Key()] _, ok := me.m[pk]
return ok, nil return ok, nil
} }
func (me *mapPieceCompletion) Set(p metainfo.Piece, b bool) error { func (me *mapPieceCompletion) Set(pk metainfo.PieceKey, b bool) error {
if b { if b {
if me.m == nil { if me.m == nil {
me.m = make(map[metainfo.PieceKey]struct{}) me.m = make(map[metainfo.PieceKey]struct{})
} }
me.m[p.Key()] = struct{}{} me.m[pk] = struct{}{}
} else { } else {
delete(me.m, p.Key()) delete(me.m, pk)
} }
return nil return nil
} }

View File

@ -26,17 +26,17 @@ func newDBPieceCompletion(dir string) (ret *dbPieceCompletion, err error) {
return return
} }
func (me *dbPieceCompletion) Get(p metainfo.Piece) (ret bool, err error) { func (me *dbPieceCompletion) Get(pk metainfo.PieceKey) (ret bool, err error) {
row := me.db.QueryRow(`select exists(select * from completed where infohash=? and "index"=?)`, p.Info.Hash().HexString(), p.Index()) row := me.db.QueryRow(`select exists(select * from completed where infohash=? and "index"=?)`, pk.InfoHash.HexString(), pk.Index)
err = row.Scan(&ret) err = row.Scan(&ret)
return return
} }
func (me *dbPieceCompletion) Set(p metainfo.Piece, b bool) (err error) { func (me *dbPieceCompletion) Set(pk metainfo.PieceKey, b bool) (err error) {
if b { if b {
_, err = me.db.Exec(`insert into completed (infohash, "index") values (?, ?)`, p.Info.Hash().HexString(), p.Index()) _, err = me.db.Exec(`insert into completed (infohash, "index") values (?, ?)`, pk.InfoHash.HexString(), pk.Index)
} else { } else {
_, err = me.db.Exec(`delete from completed where infohash=? and "index"=?`, p.Info.Hash().HexString(), p.Index()) _, err = me.db.Exec(`delete from completed where infohash=? and "index"=?`, pk.InfoHash.HexString(), pk.Index)
} }
return return
} }

View File

@ -23,10 +23,11 @@ func NewFile(baseDir string) Client {
} }
} }
func (fs *fileStorage) OpenTorrent(info *metainfo.InfoEx) (Torrent, error) { func (fs *fileStorage) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (Torrent, error) {
return &fileTorrentStorage{ return &fileTorrentStorage{
fs, fs,
&info.Info, info,
infoHash,
pieceCompletionForDir(fs.baseDir), pieceCompletionForDir(fs.baseDir),
}, nil }, nil
} }
@ -35,6 +36,7 @@ func (fs *fileStorage) OpenTorrent(info *metainfo.InfoEx) (Torrent, error) {
type fileTorrentStorage struct { type fileTorrentStorage struct {
fs *fileStorage fs *fileStorage
info *metainfo.Info info *metainfo.Info
infoHash metainfo.Hash
completion pieceCompletion completion pieceCompletion
} }

View File

@ -9,32 +9,29 @@ import (
) )
func TestExtentCompleteRequiredLengths(t *testing.T) { func TestExtentCompleteRequiredLengths(t *testing.T) {
info := &metainfo.InfoEx{ info := &metainfo.Info{
Info: metainfo.Info{ Files: []metainfo.FileInfo{
Files: []metainfo.FileInfo{ {Path: []string{"a"}, Length: 2},
{Path: []string{"a"}, Length: 2}, {Path: []string{"b"}, Length: 3},
{Path: []string{"b"}, Length: 3},
},
}, },
} }
assert.Empty(t, extentCompleteRequiredLengths(&info.Info, 0, 0)) assert.Empty(t, extentCompleteRequiredLengths(info, 0, 0))
assert.EqualValues(t, []metainfo.FileInfo{ assert.EqualValues(t, []metainfo.FileInfo{
{Path: []string{"a"}, Length: 1}, {Path: []string{"a"}, Length: 1},
}, extentCompleteRequiredLengths(&info.Info, 0, 1)) }, extentCompleteRequiredLengths(info, 0, 1))
assert.EqualValues(t, []metainfo.FileInfo{ assert.EqualValues(t, []metainfo.FileInfo{
{Path: []string{"a"}, Length: 2}, {Path: []string{"a"}, Length: 2},
}, extentCompleteRequiredLengths(&info.Info, 0, 2)) }, extentCompleteRequiredLengths(info, 0, 2))
assert.EqualValues(t, []metainfo.FileInfo{ assert.EqualValues(t, []metainfo.FileInfo{
{Path: []string{"a"}, Length: 2}, {Path: []string{"a"}, Length: 2},
{Path: []string{"b"}, Length: 1}, {Path: []string{"b"}, Length: 1},
}, extentCompleteRequiredLengths(&info.Info, 0, 3)) }, extentCompleteRequiredLengths(info, 0, 3))
assert.EqualValues(t, []metainfo.FileInfo{ assert.EqualValues(t, []metainfo.FileInfo{
{Path: []string{"b"}, Length: 2}, {Path: []string{"b"}, Length: 2},
}, extentCompleteRequiredLengths(&info.Info, 2, 2)) }, extentCompleteRequiredLengths(info, 2, 2))
assert.EqualValues(t, []metainfo.FileInfo{ assert.EqualValues(t, []metainfo.FileInfo{
{Path: []string{"b"}, Length: 3}, {Path: []string{"b"}, Length: 3},
}, extentCompleteRequiredLengths(&info.Info, 4, 1)) }, extentCompleteRequiredLengths(info, 4, 1))
assert.Len(t, extentCompleteRequiredLengths(&info.Info, 5, 0), 0) assert.Len(t, extentCompleteRequiredLengths(info, 5, 0), 0)
assert.Panics(t, func() { extentCompleteRequiredLengths(&info.Info, 6, 1) }) assert.Panics(t, func() { extentCompleteRequiredLengths(info, 6, 1) })
} }

View File

@ -14,14 +14,18 @@ type fileStoragePiece struct {
r io.ReaderAt r io.ReaderAt
} }
func (me *fileStoragePiece) pieceKey() metainfo.PieceKey {
return metainfo.PieceKey{me.infoHash, me.p.Index()}
}
func (fs *fileStoragePiece) GetIsComplete() bool { func (fs *fileStoragePiece) GetIsComplete() bool {
ret, err := fs.completion.Get(fs.p) ret, err := fs.completion.Get(fs.pieceKey())
if err != nil || !ret { if err != nil || !ret {
return false return false
} }
// If it's allegedly complete, check that its constituent files have the // If it's allegedly complete, check that its constituent files have the
// necessary length. // necessary length.
for _, fi := range extentCompleteRequiredLengths(&fs.p.Info.Info, fs.p.Offset(), fs.p.Length()) { for _, fi := range extentCompleteRequiredLengths(fs.p.Info, fs.p.Offset(), fs.p.Length()) {
s, err := os.Stat(fs.fileInfoName(fi)) s, err := os.Stat(fs.fileInfoName(fi))
if err != nil || s.Size() < fi.Length { if err != nil || s.Size() < fi.Length {
ret = false ret = false
@ -32,12 +36,12 @@ func (fs *fileStoragePiece) GetIsComplete() bool {
return true return true
} }
// The completion was wrong, fix it. // The completion was wrong, fix it.
fs.completion.Set(fs.p, false) fs.completion.Set(fs.pieceKey(), false)
return false return false
} }
func (fs *fileStoragePiece) MarkComplete() error { func (fs *fileStoragePiece) MarkComplete() error {
fs.completion.Set(fs.p, true) fs.completion.Set(fs.pieceKey(), true)
return nil return nil
} }
@ -50,6 +54,6 @@ func (fsp *fileStoragePiece) ReadAt(b []byte, off int64) (n int, err error) {
if off < 0 || off >= fsp.p.Length() { if off < 0 || off >= fsp.p.Length() {
return return
} }
fsp.completion.Set(fsp.p, false) fsp.completion.Set(fsp.pieceKey(), false)
return return
} }

View File

@ -20,14 +20,12 @@ func TestShortFile(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer os.RemoveAll(td) defer os.RemoveAll(td)
s := NewFile(td) s := NewFile(td)
info := &metainfo.InfoEx{ info := &metainfo.Info{
Info: metainfo.Info{ Name: "a",
Name: "a", Length: 2,
Length: 2, PieceLength: missinggo.MiB,
PieceLength: missinggo.MiB,
},
} }
ts, err := s.OpenTorrent(info) ts, err := s.OpenTorrent(info, metainfo.Hash{})
assert.NoError(t, err) assert.NoError(t, err)
f, err := os.Create(filepath.Join(td, "a")) f, err := os.Create(filepath.Join(td, "a"))
err = f.Truncate(1) err = f.Truncate(1)

View File

@ -8,7 +8,7 @@ import (
// Represents data storage for an unspecified torrent. // Represents data storage for an unspecified torrent.
type Client interface { type Client interface {
OpenTorrent(info *metainfo.InfoEx) (Torrent, error) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (Torrent, error)
} }
// Data storage bound to a torrent. // Data storage bound to a torrent.

View File

@ -6,31 +6,26 @@ import (
"testing" "testing"
"github.com/anacrolix/missinggo/resource" "github.com/anacrolix/missinggo/resource"
"github.com/anacrolix/torrent/metainfo"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/anacrolix/torrent/metainfo"
) )
// Two different torrents opened from the same storage. Closing one should not // Two different torrents opened from the same storage. Closing one should not
// break the piece completion on the other. // break the piece completion on the other.
func testIssue95(t *testing.T, c Client) { func testIssue95(t *testing.T, c Client) {
i1 := &metainfo.InfoEx{ i1 := &metainfo.Info{
Bytes: []byte("a"), Files: []metainfo.FileInfo{{Path: []string{"a"}}},
Info: metainfo.Info{ Pieces: make([]byte, 20),
Files: []metainfo.FileInfo{{Path: []string{"a"}}},
Pieces: make([]byte, 20),
},
} }
t1, err := c.OpenTorrent(i1) t1, err := c.OpenTorrent(i1, metainfo.HashBytes([]byte("a")))
require.NoError(t, err) require.NoError(t, err)
i2 := &metainfo.InfoEx{ i2 := &metainfo.Info{
Bytes: []byte("b"), Files: []metainfo.FileInfo{{Path: []string{"a"}}},
Info: metainfo.Info{ Pieces: make([]byte, 20),
Files: []metainfo.FileInfo{{Path: []string{"a"}}},
Pieces: make([]byte, 20),
},
} }
t2, err := c.OpenTorrent(i2) t2, err := c.OpenTorrent(i2, metainfo.HashBytes([]byte("b")))
require.NoError(t, err) require.NoError(t, err)
t2p := t2.Piece(i2.Piece(0)) t2p := t2.Piece(i2.Piece(0))
assert.NoError(t, t1.Close()) assert.NoError(t, t1.Close())

View File

@ -15,13 +15,11 @@ func testMarkedCompleteMissingOnRead(t *testing.T, csf func(string) Client) {
require.NoError(t, err) require.NoError(t, err)
defer os.RemoveAll(td) defer os.RemoveAll(td)
cs := csf(td) cs := csf(td)
info := &metainfo.InfoEx{ info := &metainfo.Info{
Info: metainfo.Info{ PieceLength: 1,
PieceLength: 1, Files: []metainfo.FileInfo{{Path: []string{"a"}, Length: 1}},
Files: []metainfo.FileInfo{{Path: []string{"a"}, Length: 1}},
},
} }
ts, err := cs.OpenTorrent(info) ts, err := cs.OpenTorrent(info, metainfo.Hash{})
require.NoError(t, err) require.NoError(t, err)
p := ts.Piece(info.Piece(0)) p := ts.Piece(info.Piece(0))
require.NoError(t, p.MarkComplete()) require.NoError(t, p.MarkComplete())

View File

@ -23,8 +23,8 @@ func NewMMap(baseDir string) Client {
} }
} }
func (s *mmapStorage) OpenTorrent(info *metainfo.InfoEx) (t Torrent, err error) { func (s *mmapStorage) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (t Torrent, err error) {
span, err := mMapTorrent(&info.Info, s.baseDir) span, err := mMapTorrent(info, s.baseDir)
t = &mmapTorrentStorage{ t = &mmapTorrentStorage{
span: span, span: span,
pc: pieceCompletionForDir(s.baseDir), pc: pieceCompletionForDir(s.baseDir),
@ -54,17 +54,22 @@ func (ts *mmapTorrentStorage) Close() error {
type mmapStoragePiece struct { type mmapStoragePiece struct {
pc pieceCompletion pc pieceCompletion
p metainfo.Piece p metainfo.Piece
ih metainfo.Hash
io.ReaderAt io.ReaderAt
io.WriterAt io.WriterAt
} }
func (me mmapStoragePiece) pieceKey() metainfo.PieceKey {
return metainfo.PieceKey{me.ih, me.p.Index()}
}
func (sp mmapStoragePiece) GetIsComplete() (ret bool) { func (sp mmapStoragePiece) GetIsComplete() (ret bool) {
ret, _ = sp.pc.Get(sp.p) ret, _ = sp.pc.Get(sp.pieceKey())
return return
} }
func (sp mmapStoragePiece) MarkComplete() error { func (sp mmapStoragePiece) MarkComplete() error {
sp.pc.Set(sp.p, true) sp.pc.Set(sp.pieceKey(), true)
return nil return nil
} }

View File

@ -25,7 +25,7 @@ type pieceFileTorrentStorage struct {
s *pieceFileStorage s *pieceFileStorage
} }
func (s *pieceFileStorage) OpenTorrent(info *metainfo.InfoEx) (Torrent, error) { func (s *pieceFileStorage) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (Torrent, error) {
return &pieceFileTorrentStorage{s}, nil return &pieceFileTorrentStorage{s}, nil
} }

View File

@ -20,7 +20,7 @@ func NewResourcePieces(p resource.Provider) Client {
} }
} }
func (s *piecePerResource) OpenTorrent(info *metainfo.InfoEx) (Torrent, error) { func (s *piecePerResource) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (Torrent, error) {
return s, nil return s, nil
} }

4
t.go
View File

@ -24,7 +24,7 @@ func (t *Torrent) GotInfo() <-chan struct{} {
} }
// Returns the metainfo info dictionary, or nil if it's not yet available. // Returns the metainfo info dictionary, or nil if it's not yet available.
func (t *Torrent) Info() *metainfo.InfoEx { func (t *Torrent) Info() *metainfo.Info {
return t.info return t.info
} }
@ -117,7 +117,7 @@ func (t *Torrent) Length() int64 {
// Returns a run-time generated metainfo for the torrent that includes the // Returns a run-time generated metainfo for the torrent that includes the
// info bytes and announce-list as currently known to the client. // info bytes and announce-list as currently known to the client.
func (t *Torrent) Metainfo() *metainfo.MetaInfo { func (t *Torrent) Metainfo() metainfo.MetaInfo {
t.cl.mu.Lock() t.cl.mu.Lock()
defer t.cl.mu.Unlock() defer t.cl.mu.Unlock()
return t.newMetaInfo() return t.newMetaInfo()

View File

@ -62,7 +62,7 @@ type Torrent struct {
metainfo metainfo.MetaInfo metainfo metainfo.MetaInfo
// The info dict. nil if we don't have it (yet). // The info dict. nil if we don't have it (yet).
info *metainfo.InfoEx info *metainfo.Info
// Active peer connections, running message stream loops. // Active peer connections, running message stream loops.
conns []*connection conns []*connection
maxEstablishedConns int maxEstablishedConns int
@ -210,7 +210,7 @@ func infoPieceHashes(info *metainfo.Info) (ret []string) {
} }
func (t *Torrent) makePieces() { func (t *Torrent) makePieces() {
hashes := infoPieceHashes(&t.info.Info) hashes := infoPieceHashes(t.info)
t.pieces = make([]piece, len(hashes)) t.pieces = make([]piece, len(hashes))
for i, hash := range hashes { for i, hash := range hashes {
piece := &t.pieces[i] piece := &t.pieces[i]
@ -226,24 +226,24 @@ func (t *Torrent) setInfoBytes(b []byte) error {
if t.haveInfo() { if t.haveInfo() {
return nil return nil
} }
var ie *metainfo.InfoEx if metainfo.HashBytes(b) != t.infoHash {
err := bencode.Unmarshal(b, &ie) return errors.New("info bytes have wrong hash")
}
var info metainfo.Info
err := bencode.Unmarshal(b, &info)
if err != nil { if err != nil {
return fmt.Errorf("error unmarshalling info bytes: %s", err) return fmt.Errorf("error unmarshalling info bytes: %s", err)
} }
if ie.Hash() != t.infoHash { err = validateInfo(&info)
return errors.New("info bytes have wrong hash")
}
err = validateInfo(&ie.Info)
if err != nil { if err != nil {
return fmt.Errorf("bad info: %s", err) return fmt.Errorf("bad info: %s", err)
} }
defer t.updateWantPeersEvent() defer t.updateWantPeersEvent()
t.info = ie t.info = &info
t.displayName = "" // Save a few bytes lol. t.displayName = "" // Save a few bytes lol.
t.cl.event.Broadcast() t.cl.event.Broadcast()
t.gotMetainfo.Set() t.gotMetainfo.Set()
t.storage, err = t.storageOpener.OpenTorrent(t.info) t.storage, err = t.storageOpener.OpenTorrent(t.info, t.infoHash)
if err != nil { if err != nil {
return fmt.Errorf("error opening torrent storage: %s", err) return fmt.Errorf("error opening torrent storage: %s", err)
} }
@ -475,17 +475,14 @@ func (t *Torrent) announceList() (al [][]string) {
// Returns a run-time generated MetaInfo that includes the info bytes and // Returns a run-time generated MetaInfo that includes the info bytes and
// announce-list as currently known to the client. // announce-list as currently known to the client.
func (t *Torrent) newMetaInfo() (mi *metainfo.MetaInfo) { func (t *Torrent) newMetaInfo() metainfo.MetaInfo {
mi = &metainfo.MetaInfo{ return metainfo.MetaInfo{
CreationDate: time.Now().Unix(), CreationDate: time.Now().Unix(),
Comment: "dynamic metainfo from client", Comment: "dynamic metainfo from client",
CreatedBy: "go.torrent", CreatedBy: "go.torrent",
AnnounceList: t.announceList(), AnnounceList: t.announceList(),
InfoBytes: t.metadataBytes,
} }
if t.info != nil {
mi.Info = *t.info
}
return
} }
func (t *Torrent) BytesMissing() int64 { func (t *Torrent) BytesMissing() int64 {

View File

@ -69,7 +69,7 @@ func torrentFileInfoHash(fileName string) (ih metainfo.Hash, ok bool) {
if mi == nil { if mi == nil {
return return
} }
ih = mi.Info.Hash() ih = mi.HashInfoBytes()
ok = true ok = true
return return
} }