Ditch Data.WriteSectionTo, and fix cmd/torrent-verify

This commit is contained in:
Matt Joiner 2016-02-18 11:45:31 +11:00
parent dcce061152
commit d5f888069f
7 changed files with 25 additions and 123 deletions

View File

@ -436,12 +436,6 @@ func (me badData) WriteAt(b []byte, off int64) (int, error) {
return 0, nil
}
func (me badData) WriteSectionTo(w io.Writer, off, n int64) (int64, error) {
p := []byte(me.randomlyTruncatedDataString())
written, err := w.Write(p)
return int64(written), err
}
func (me badData) PieceComplete(piece int) bool {
return true
}

View File

@ -5,10 +5,12 @@ import (
"crypto/sha1"
"flag"
"fmt"
"io"
"log"
"os"
"path/filepath"
"github.com/bradfitz/iter"
"github.com/edsrzf/mmap-go"
"github.com/anacrolix/torrent/metainfo"
@ -20,8 +22,11 @@ var (
dataPath = flag.String("path", "/torrent/data", "path of the torrent data")
)
func fileToMmap(filename string, length int64, devZero *os.File) mmap.MMap {
func fileToMmap(filename string, length int64) mmap.MMap {
osFile, err := os.Open(filename)
if os.IsNotExist(err) {
return nil
}
if err != nil {
log.Fatal(err)
}
@ -38,40 +43,34 @@ func fileToMmap(filename string, length int64, devZero *os.File) mmap.MMap {
}
func main() {
log.SetFlags(log.Flags() | log.Lshortfile)
flag.Parse()
metaInfo, err := metainfo.LoadFromFile(*torrentPath)
if err != nil {
log.Fatal(err)
}
devZero, err := os.Open("/dev/zero")
if err != nil {
log.Print(err)
}
defer devZero.Close()
mMapSpan := &mmap_span.MMapSpan{}
if len(metaInfo.Info.Files) > 0 {
for _, file := range metaInfo.Info.Files {
filename := filepath.Join(append([]string{*dataPath, metaInfo.Info.Name}, file.Path...)...)
goMMap := fileToMmap(filename, file.Length, devZero)
goMMap := fileToMmap(filename, file.Length)
mMapSpan.Append(goMMap)
}
log.Println(len(metaInfo.Info.Files))
} else {
goMMap := fileToMmap(*dataPath, metaInfo.Info.Length, devZero)
goMMap := fileToMmap(*dataPath, metaInfo.Info.Length)
mMapSpan.Append(goMMap)
}
log.Println(mMapSpan.Size())
log.Println(len(metaInfo.Info.Pieces))
for piece := 0; piece < (len(metaInfo.Info.Pieces)+sha1.Size-1)/sha1.Size; piece++ {
expectedHash := metaInfo.Info.Pieces[sha1.Size*piece : sha1.Size*(piece+1)]
if len(expectedHash) == 0 {
break
}
info := metaInfo.Info
for i := range iter.N(metaInfo.Info.NumPieces()) {
p := info.Piece(i)
hash := sha1.New()
_, err := mMapSpan.WriteSectionTo(hash, int64(piece)*metaInfo.Info.PieceLength, metaInfo.Info.PieceLength)
_, err := io.Copy(hash, io.NewSectionReader(mMapSpan, p.Offset(), p.Length()))
if err != nil {
log.Fatal(err)
}
fmt.Println(piece, bytes.Equal(hash.Sum(nil), expectedHash))
fmt.Printf("%d: %x: %v\n", i, p.Hash(), bytes.Equal(hash.Sum(nil), p.Hash()))
}
}

View File

@ -8,8 +8,6 @@ type Data interface {
io.WriterAt
// Bro, do you even io.Closer?
Close()
// If the data isn't available, err should be io.ErrUnexpectedEOF.
WriteSectionTo(w io.Writer, off, n int64) (written int64, err error)
// We believe the piece data will pass a hash check.
PieceCompleted(index int) error
// Returns true if the piece is complete.

View File

@ -41,6 +41,9 @@ func (me data) ReadAt(p []byte, off int64) (n int, err error) {
}
var f *os.File
f, err = os.Open(me.fileInfoName(fi))
if os.IsNotExist(err) {
err = io.ErrUnexpectedEOF
}
if err != nil {
return
}
@ -92,46 +95,6 @@ func (me data) WriteAt(p []byte, off int64) (n int, err error) {
return
}
func (me data) WriteSectionTo(w io.Writer, off, n int64) (written int64, err error) {
for _, fi := range me.info.UpvertedFiles() {
if off >= fi.Length {
off -= fi.Length
continue
}
n1 := fi.Length - off
if n1 > n {
n1 = n
}
var f *os.File
f, err = os.Open(me.fileInfoName(fi))
if os.IsNotExist(err) {
err = io.ErrUnexpectedEOF
}
if err != nil {
return
}
var w1 int64
w1, err = io.Copy(w, io.NewSectionReader(f, off, n1))
f.Close()
written += w1
if w1 != n1 {
if err == nil || err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
} else {
err = nil
}
off = 0
n -= n1
if n == 0 {
return
}
}
err = io.EOF
return
}
func (me data) fileInfoName(fi metainfo.FileInfo) string {
return filepath.Join(append([]string{me.loc, me.info.Name}, fi.Path...)...)
}

View File

@ -73,38 +73,6 @@ func (me *data) pieceReader(p metainfo.Piece, off int64) (ret io.ReadCloser, err
return me.store.getPieceRange(p, off, p.Length()-off)
}
func (me *data) WriteSectionTo(w io.Writer, off, n int64) (written int64, err error) {
i := int(off / me.info.PieceLength)
off %= me.info.PieceLength
for n != 0 {
if i >= me.info.NumPieces() {
err = io.EOF
break
}
p := me.info.Piece(i)
if off >= p.Length() {
err = io.EOF
break
}
var pr io.ReadCloser
pr, err = me.pieceReader(p, off)
if err != nil {
return
}
var n1 int64
n1, err = io.CopyN(w, pr, n)
pr.Close()
written += n1
n -= n1
if err != nil {
return
}
off = 0
i++
}
return
}
func (me *data) PieceCompleted(index int) (err error) {
return me.store.pieceCompleted(me.info.Piece(index))
}

View File

@ -48,24 +48,6 @@ func (me MMapSpan) ReadAt(p []byte, off int64) (n int, err error) {
return
}
func (me MMapSpan) WriteSectionTo(w io.Writer, off, n int64) (written int64, err error) {
me.ApplyTo(off, func(intervalOffset int64, interval sizer) (stop bool) {
var _n int
p := (*interval.(segment).MMap)[intervalOffset:]
if n < int64(len(p)) {
p = p[:n]
}
_n, err = w.Write(p)
written += int64(_n)
n -= int64(_n)
if err != nil {
return true
}
return n == 0
})
return
}
func (me MMapSpan) WriteAt(p []byte, off int64) (n int, err error) {
me.ApplyTo(off, func(iOff int64, i sizer) (stop bool) {
mMap := i.(segment)

View File

@ -669,22 +669,20 @@ func (t *torrent) pieceLength(piece int) (len_ pp.Integer) {
return
}
func (t *torrent) hashPiece(piece int) (ps pieceSum) {
func (t *torrent) hashPiece(piece int) (ret pieceSum) {
hash := pieceHash.New()
p := &t.Pieces[piece]
p.waitNoPendingWrites()
pl := t.Info.Piece(int(piece)).Length()
n, err := t.data.WriteSectionTo(hash, int64(piece)*t.Info.PieceLength, pl)
if err != nil {
if err != io.ErrUnexpectedEOF {
log.Printf("error hashing piece with %T: %s", t.data, err)
}
ip := t.Info.Piece(piece)
pl := ip.Length()
n, err := io.Copy(hash, io.NewSectionReader(t.data, ip.Offset(), pl))
if n == pl {
missinggo.CopyExact(&ret, hash.Sum(nil))
return
}
if n != pl {
panic(fmt.Sprintf("%T: %d != %d", t.data, n, pl))
if err != io.ErrUnexpectedEOF {
log.Printf("unexpected error hashing piece with %T: %s", t.data, err)
}
missinggo.CopyExact(ps[:], hash.Sum(nil))
return
}