Finally make Torrent.torrent private
A lot of code was using unintentionally exported stuff from the embedded *torrent in Torrent.
This commit is contained in:
parent
8696f32e58
commit
89a6a20af6
|
@ -242,7 +242,7 @@ func (cl *Client) WriteStatus(_w io.Writer) {
|
|||
}
|
||||
fmt.Fprint(w, "\n")
|
||||
if t.haveInfo() {
|
||||
fmt.Fprintf(w, "%f%% of %d bytes", 100*(1-float32(t.bytesLeft())/float32(t.Length())), t.Length())
|
||||
fmt.Fprintf(w, "%f%% of %d bytes", 100*(1-float32(t.bytesLeft())/float32(t.length)), t.length)
|
||||
} else {
|
||||
w.WriteString("<missing metainfo>")
|
||||
}
|
||||
|
@ -2052,7 +2052,7 @@ func (t Torrent) Files() (ret []File) {
|
|||
func (t Torrent) SetRegionPriority(off, len int64) {
|
||||
t.cl.mu.Lock()
|
||||
defer t.cl.mu.Unlock()
|
||||
pieceSize := int64(t.usualPieceSize())
|
||||
pieceSize := int64(t.torrent.usualPieceSize())
|
||||
for i := off / pieceSize; i*pieceSize < off+len; i++ {
|
||||
t.cl.raisePiecePriority(t.torrent, int(i), PiecePriorityNormal)
|
||||
}
|
||||
|
@ -2071,13 +2071,13 @@ func (t Torrent) AddPeers(pp []Peer) error {
|
|||
func (t Torrent) DownloadAll() {
|
||||
t.cl.mu.Lock()
|
||||
defer t.cl.mu.Unlock()
|
||||
for i := range iter.N(t.numPieces()) {
|
||||
for i := range iter.N(t.torrent.numPieces()) {
|
||||
t.cl.raisePiecePriority(t.torrent, i, PiecePriorityNormal)
|
||||
}
|
||||
// Nice to have the first and last pieces sooner for various interactive
|
||||
// purposes.
|
||||
t.cl.raisePiecePriority(t.torrent, 0, PiecePriorityReadahead)
|
||||
t.cl.raisePiecePriority(t.torrent, t.numPieces()-1, PiecePriorityReadahead)
|
||||
t.cl.raisePiecePriority(t.torrent, t.torrent.numPieces()-1, PiecePriorityReadahead)
|
||||
}
|
||||
|
||||
// Returns nil metainfo if it isn't in the cache. Checks that the retrieved
|
||||
|
|
|
@ -292,7 +292,7 @@ func TestClientTransfer(t *testing.T) {
|
|||
// TODO: The piece state publishing is kinda jammed in here until I have a
|
||||
// more thorough test.
|
||||
go func() {
|
||||
s := leecherGreeting.pieceStateChanges.Subscribe()
|
||||
s := leecherGreeting.torrent.pieceStateChanges.Subscribe()
|
||||
defer s.Close()
|
||||
for i := range s.Values {
|
||||
log.Print(i)
|
||||
|
@ -412,8 +412,8 @@ func TestMergingTrackersByAddingSpecs(t *testing.T) {
|
|||
if new {
|
||||
t.FailNow()
|
||||
}
|
||||
assert.EqualValues(t, T.Trackers[0][0].URL(), "http://a")
|
||||
assert.EqualValues(t, T.Trackers[1][0].URL(), "udp://b")
|
||||
assert.EqualValues(t, T.torrent.Trackers[0][0].URL(), "http://a")
|
||||
assert.EqualValues(t, T.torrent.Trackers[1][0].URL(), "udp://b")
|
||||
}
|
||||
|
||||
type badData struct{}
|
||||
|
|
4
file.go
4
file.go
|
@ -54,7 +54,7 @@ type FilePieceState struct {
|
|||
|
||||
// Returns the state of pieces in this file.
|
||||
func (f *File) State() (ret []FilePieceState) {
|
||||
pieceSize := int64(f.t.usualPieceSize())
|
||||
pieceSize := int64(f.t.torrent.usualPieceSize())
|
||||
off := f.offset % pieceSize
|
||||
remaining := f.length
|
||||
for i := int(f.offset / pieceSize); ; i++ {
|
||||
|
@ -66,7 +66,7 @@ func (f *File) State() (ret []FilePieceState) {
|
|||
len1 = remaining
|
||||
}
|
||||
f.t.cl.mu.RLock()
|
||||
ps := f.t.pieceState(i)
|
||||
ps := f.t.torrent.pieceState(i)
|
||||
f.t.cl.mu.RUnlock()
|
||||
ret = append(ret, FilePieceState{len1, ps})
|
||||
off = 0
|
||||
|
|
18
reader.go
18
reader.go
|
@ -42,30 +42,30 @@ func (r *Reader) readable(off int64) (ret bool) {
|
|||
// defer func() {
|
||||
// log.Println("readable", ret)
|
||||
// }()
|
||||
if r.t.isClosed() {
|
||||
if r.t.torrent.isClosed() {
|
||||
return true
|
||||
}
|
||||
req, ok := r.t.offsetRequest(off)
|
||||
req, ok := r.t.torrent.offsetRequest(off)
|
||||
if !ok {
|
||||
panic(off)
|
||||
}
|
||||
if r.responsive {
|
||||
return r.t.haveChunk(req)
|
||||
return r.t.torrent.haveChunk(req)
|
||||
}
|
||||
return r.t.pieceComplete(int(req.Index))
|
||||
return r.t.torrent.pieceComplete(int(req.Index))
|
||||
}
|
||||
|
||||
// How many bytes are available to read. Max is the most we could require.
|
||||
func (r *Reader) available(off, max int64) (ret int64) {
|
||||
for max > 0 {
|
||||
req, ok := r.t.offsetRequest(off)
|
||||
req, ok := r.t.torrent.offsetRequest(off)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if !r.t.haveChunk(req) {
|
||||
if !r.t.torrent.haveChunk(req) {
|
||||
break
|
||||
}
|
||||
len1 := int64(req.Length) - (off - r.t.requestOffset(req))
|
||||
len1 := int64(req.Length) - (off - r.t.torrent.requestOffset(req))
|
||||
max -= len1
|
||||
ret += len1
|
||||
off += len1
|
||||
|
@ -136,12 +136,12 @@ again:
|
|||
tp.noPendingWrites.Wait()
|
||||
}
|
||||
tp.pendingWritesMutex.Unlock()
|
||||
n, err = dataReadAt(r.t.data, b1, pos)
|
||||
n, err = dataReadAt(r.t.torrent.data, b1, pos)
|
||||
if n != 0 {
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
if r.t.isClosed() {
|
||||
if r.t.torrent.isClosed() {
|
||||
if err == nil {
|
||||
err = errors.New("torrent closed")
|
||||
}
|
||||
|
|
27
t.go
27
t.go
|
@ -11,8 +11,8 @@ import (
|
|||
|
||||
// The public handle to a live torrent within a Client.
|
||||
type Torrent struct {
|
||||
cl *Client
|
||||
*torrent
|
||||
cl *Client
|
||||
torrent *torrent
|
||||
}
|
||||
|
||||
// The torrent's infohash. This is fixed and cannot change. It uniquely
|
||||
|
@ -48,13 +48,13 @@ func (t Torrent) NewReader() (ret *Reader) {
|
|||
// same state. The sum of the state run lengths is the number of pieces
|
||||
// in the torrent.
|
||||
func (t Torrent) PieceStateRuns() []PieceStateRun {
|
||||
t.stateMu.Lock()
|
||||
defer t.stateMu.Unlock()
|
||||
t.torrent.stateMu.Lock()
|
||||
defer t.torrent.stateMu.Unlock()
|
||||
return t.torrent.pieceStateRuns()
|
||||
}
|
||||
|
||||
func (t Torrent) NumPieces() int {
|
||||
return t.numPieces()
|
||||
return t.torrent.numPieces()
|
||||
}
|
||||
|
||||
// Drop the torrent from the client, and close it.
|
||||
|
@ -68,7 +68,7 @@ func (t Torrent) Drop() {
|
|||
func (t Torrent) BytesCompleted() int64 {
|
||||
t.cl.mu.RLock()
|
||||
defer t.cl.mu.RUnlock()
|
||||
return t.bytesCompleted()
|
||||
return t.torrent.bytesCompleted()
|
||||
}
|
||||
|
||||
// The subscription emits as (int) the index of pieces as their state changes.
|
||||
|
@ -92,3 +92,18 @@ func (t Torrent) SetDisplayName(dn string) {
|
|||
defer t.cl.mu.Unlock()
|
||||
t.torrent.setDisplayName(dn)
|
||||
}
|
||||
|
||||
func (t Torrent) Name() string {
|
||||
t.cl.mu.Lock()
|
||||
defer t.cl.mu.Unlock()
|
||||
return t.torrent.Name()
|
||||
}
|
||||
|
||||
func (t Torrent) Length() int64 {
|
||||
select {
|
||||
case <-t.GotInfo():
|
||||
return t.torrent.length
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
|
10
torrent.go
10
torrent.go
|
@ -543,10 +543,6 @@ func (t *torrent) numPiecesCompleted() (num int) {
|
|||
return
|
||||
}
|
||||
|
||||
func (t *torrent) Length() int64 {
|
||||
return t.length
|
||||
}
|
||||
|
||||
func (t *torrent) isClosed() bool {
|
||||
select {
|
||||
case <-t.closing:
|
||||
|
@ -573,13 +569,13 @@ func (t *torrent) close() (err error) {
|
|||
}
|
||||
|
||||
func (t *torrent) requestOffset(r request) int64 {
|
||||
return torrentRequestOffset(t.Length(), int64(t.usualPieceSize()), r)
|
||||
return torrentRequestOffset(t.length, int64(t.usualPieceSize()), r)
|
||||
}
|
||||
|
||||
// Return the request that would include the given offset into the torrent
|
||||
// data. Returns !ok if there is no such request.
|
||||
func (t *torrent) offsetRequest(off int64) (req request, ok bool) {
|
||||
return torrentOffsetRequest(t.Length(), t.Info.PieceLength, int64(t.chunkSize), off)
|
||||
return torrentOffsetRequest(t.length, t.Info.PieceLength, int64(t.chunkSize), off)
|
||||
}
|
||||
|
||||
func (t *torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
|
||||
|
@ -650,7 +646,7 @@ func (t *torrent) pieceLength(piece int) (len_ pp.Integer) {
|
|||
return
|
||||
}
|
||||
if int(piece) == t.numPieces()-1 {
|
||||
len_ = pp.Integer(t.Length() % t.Info.PieceLength)
|
||||
len_ = pp.Integer(t.length % t.Info.PieceLength)
|
||||
}
|
||||
if len_ == 0 {
|
||||
len_ = pp.Integer(t.Info.PieceLength)
|
||||
|
|
Loading…
Reference in New Issue