FedP2P/piece.go

144 lines
3.1 KiB
Go
Raw Normal View History

2015-04-08 00:14:35 +08:00
package torrent
import (
"math/rand"
"sync"
"github.com/bradfitz/iter"
pp "github.com/anacrolix/torrent/peer_protocol"
2015-04-08 00:14:35 +08:00
)
2015-06-01 16:17:14 +08:00
// Piece priority describes the importance of obtaining a particular piece.
2015-04-08 00:14:35 +08:00
type piecePriority byte
func (me *piecePriority) Raise(maybe piecePriority) {
if maybe > *me {
*me = maybe
}
}
2015-04-08 00:14:35 +08:00
const (
PiecePriorityNone piecePriority = iota // Not wanted.
PiecePriorityNormal // Wanted.
PiecePriorityReadahead // May be required soon.
PiecePriorityNext // Succeeds a piece where a read occurred.
PiecePriorityNow // A read occurred in this piece.
2015-04-08 00:14:35 +08:00
)
type piece struct {
2015-06-16 14:57:47 +08:00
// The completed piece SHA1 hash, from the metainfo "pieces" field.
Hash pieceSum
// Chunks we've written to since the last check. The chunk offset and
// length can be determined by the request chunkSize in use.
DirtyChunks []bool
Hashing bool
QueuedForHash bool
EverHashed bool
PublicPieceState PieceState
priority piecePriority
pendingWritesMutex sync.Mutex
pendingWrites int
noPendingWrites sync.Cond
2015-04-08 00:14:35 +08:00
}
func (p *piece) pendingChunk(cs chunkSpec, chunkSize pp.Integer) bool {
ci := chunkIndex(cs, chunkSize)
if ci >= len(p.DirtyChunks) {
return true
}
return !p.DirtyChunks[ci]
}
2016-02-04 22:18:54 +08:00
func (p *piece) hasDirtyChunks() bool {
for _, dirty := range p.DirtyChunks {
if dirty {
return true
}
}
return false
}
func (p *piece) numDirtyChunks() (ret int) {
for _, dirty := range p.DirtyChunks {
if dirty {
ret++
}
}
return
}
func (p *piece) unpendChunkIndex(i int) {
for i >= len(p.DirtyChunks) {
p.DirtyChunks = append(p.DirtyChunks, false)
}
p.DirtyChunks[i] = true
}
func (p *piece) pendChunkIndex(i int) {
if i >= len(p.DirtyChunks) {
return
}
p.DirtyChunks[i] = false
}
func chunkIndexSpec(index int, pieceLength, chunkSize pp.Integer) chunkSpec {
ret := chunkSpec{pp.Integer(index) * chunkSize, chunkSize}
if ret.Begin+ret.Length > pieceLength {
ret.Length = pieceLength - ret.Begin
}
return ret
}
func (p *piece) shuffledPendingChunkSpecs(t *torrent, piece int) (css []chunkSpec) {
// defer func() {
// log.Println(piece, css)
// }()
numPending := t.pieceNumPendingChunks(piece)
if numPending == 0 {
2015-04-08 00:14:35 +08:00
return
}
css = make([]chunkSpec, 0, numPending)
for ci := range iter.N(t.pieceNumChunks(piece)) {
if ci >= len(p.DirtyChunks) || !p.DirtyChunks[ci] {
css = append(css, t.chunkIndexSpec(ci, piece))
}
2015-04-08 00:14:35 +08:00
}
if len(css) <= 1 {
return
}
for i := range css {
j := rand.Intn(i + 1)
css[i], css[j] = css[j], css[i]
}
return
}
2016-01-25 04:22:33 +08:00
func (p *piece) incrementPendingWrites() {
p.pendingWritesMutex.Lock()
p.pendingWrites++
p.pendingWritesMutex.Unlock()
}
func (p *piece) decrementPendingWrites() {
p.pendingWritesMutex.Lock()
if p.pendingWrites == 0 {
panic("assertion")
}
p.pendingWrites--
if p.pendingWrites == 0 {
p.noPendingWrites.Broadcast()
}
p.pendingWritesMutex.Unlock()
}
func (p *piece) waitNoPendingWrites() {
p.pendingWritesMutex.Lock()
for p.pendingWrites != 0 {
p.noPendingWrites.Wait()
}
p.pendingWritesMutex.Unlock()
}