Reduce the diff to master and add peerRequesting feature const

This commit is contained in:
Matt Joiner 2021-09-20 15:09:28 +10:00
parent 1d2d1a9cde
commit 510877ea43
4 changed files with 106 additions and 60 deletions

View File

@ -305,7 +305,9 @@ func NewClient(cfg *ClientConfig) (cl *Client, err error) {
}, },
} }
//go cl.requester() if !peerRequesting {
go cl.requester()
}
return return
} }

View File

@ -655,10 +655,14 @@ func (cn *PeerConn) postBitfield() {
} }
func (cn *PeerConn) updateRequests() { func (cn *PeerConn) updateRequests() {
if cn.actualRequestState.Requests.GetCardinality() != 0 { if peerRequesting {
if cn.actualRequestState.Requests.GetCardinality() != 0 {
return
}
cn.tickleWriter()
return return
} }
cn.tickleWriter() cn.t.cl.tickleRequester()
} }
// Emits the indices in the Bitmaps bms in order, never repeating any index. // Emits the indices in the Bitmaps bms in order, never repeating any index.

View File

@ -10,6 +10,9 @@ import (
request_strategy "github.com/anacrolix/torrent/request-strategy" request_strategy "github.com/anacrolix/torrent/request-strategy"
) )
// Calculate requests individually for each peer.
const peerRequesting = true
func (cl *Client) requester() { func (cl *Client) requester() {
for { for {
update := func() chansync.Signaled { update := func() chansync.Signaled {
@ -43,7 +46,9 @@ func (cl *Client) getRequestStrategyInput() request_strategy.Input {
ts := make([]request_strategy.Torrent, 0, len(cl.torrents)) ts := make([]request_strategy.Torrent, 0, len(cl.torrents))
for _, t := range cl.torrents { for _, t := range cl.torrents {
if !t.haveInfo() { if !t.haveInfo() {
// This would be removed if metadata is handled here. // This would be removed if metadata is handled here. We have to guard against not
// knowing the piece size. If we have no info, we have no pieces too, so the end result
// is the same.
continue continue
} }
rst := request_strategy.Torrent{ rst := request_strategy.Torrent{
@ -126,72 +131,107 @@ type RequestIndex = request_strategy.RequestIndex
type chunkIndexType = request_strategy.ChunkIndex type chunkIndexType = request_strategy.ChunkIndex
func (p *Peer) applyNextRequestState() bool { func (p *Peer) applyNextRequestState() bool {
if p.actualRequestState.Requests.GetCardinality() > uint64(p.nominalMaxRequests()/2) { if peerRequesting {
return true if p.actualRequestState.Requests.GetCardinality() > uint64(p.nominalMaxRequests()/2) {
} return true
type piece struct { }
index int type piece struct {
endGame bool index int
} endGame bool
var pieceOrder []piece }
request_strategy.GetRequestablePieces( var pieceOrder []piece
p.t.cl.getRequestStrategyInput(), request_strategy.GetRequestablePieces(
func(t *request_strategy.Torrent, rsp *request_strategy.Piece, pieceIndex int) { p.t.cl.getRequestStrategyInput(),
if t.InfoHash != p.t.infoHash { func(t *request_strategy.Torrent, rsp *request_strategy.Piece, pieceIndex int) {
return if t.InfoHash != p.t.infoHash {
}
if !p.peerHasPiece(pieceIndex) {
return
}
pieceOrder = append(pieceOrder, piece{
index: pieceIndex,
endGame: rsp.Priority == PiecePriorityNow,
})
},
)
more := true
interested := false
for _, endGameIter := range []bool{false, true} {
for _, piece := range pieceOrder {
tp := p.t.piece(piece.index)
tp.iterUndirtiedChunks(func(cs chunkIndexType) {
req := cs + tp.requestIndexOffset()
if !piece.endGame && !endGameIter && p.t.pendingRequests[req] > 0 {
return return
} }
interested = true if !p.peerHasPiece(pieceIndex) {
more = p.setInterested(true) return
}
pieceOrder = append(pieceOrder, piece{
index: pieceIndex,
endGame: rsp.Priority == PiecePriorityNow,
})
},
)
more := true
interested := false
for _, endGameIter := range []bool{false, true} {
for _, piece := range pieceOrder {
tp := p.t.piece(piece.index)
tp.iterUndirtiedChunks(func(cs chunkIndexType) {
req := cs + tp.requestIndexOffset()
if !piece.endGame && !endGameIter && p.t.pendingRequests[req] > 0 {
return
}
interested = true
more = p.setInterested(true)
if !more {
return
}
if maxRequests(p.actualRequestState.Requests.GetCardinality()) >= p.nominalMaxRequests() {
return
}
if p.peerChoking && !p.peerAllowedFast.Contains(bitmap.BitIndex(piece.index)) {
return
}
var err error
more, err = p.request(req)
if err != nil {
panic(err)
}
})
if interested && maxRequests(p.actualRequestState.Requests.GetCardinality()) >= p.nominalMaxRequests() {
break
}
if !more { if !more {
return break
} }
if maxRequests(p.actualRequestState.Requests.GetCardinality()) >= p.nominalMaxRequests() {
return
}
if p.peerChoking && !p.peerAllowedFast.Contains(bitmap.BitIndex(piece.index)) {
return
}
var err error
more, err = p.request(req)
if err != nil {
panic(err)
}
})
if interested && maxRequests(p.actualRequestState.Requests.GetCardinality()) >= p.nominalMaxRequests() {
break
} }
if !more { if !more {
break break
} }
} }
if !more { if !more {
break return false
} }
if !interested {
p.setInterested(false)
}
return more
} }
next := p.nextRequestState
current := p.actualRequestState
if !p.setInterested(next.Interested) {
return false
}
more := true
current.Requests.Iterate(func(req uint32) bool {
if !next.Requests.Contains(req) {
more = p.cancel(req)
return more
}
return true
})
if !more { if !more {
return false return false
} }
if !interested { next.Requests.Iterate(func(req uint32) bool {
p.setInterested(false) // This could happen if the peer chokes us between the next state being generated, and us
} // trying to transmit the state.
if p.peerChoking && !p.peerAllowedFast.Contains(bitmap.BitIndex(req/p.t.chunksPerRegularPiece())) {
return true
}
var err error
more, err = p.request(req)
if err != nil {
panic(err)
} /* else {
log.Print(req)
} */
return more
})
return more return more
} }

View File

@ -842,14 +842,14 @@ func (t *Torrent) bitfield() (bf []bool) {
return return
} }
func (t *Torrent) chunksPerRegularPiece() uint32 {
return uint32((pp.Integer(t.usualPieceSize()) + t.chunkSize - 1) / t.chunkSize)
}
func (t *Torrent) pieceNumChunks(piece pieceIndex) chunkIndexType { func (t *Torrent) pieceNumChunks(piece pieceIndex) chunkIndexType {
return chunkIndexType((t.pieceLength(piece) + t.chunkSize - 1) / t.chunkSize) return chunkIndexType((t.pieceLength(piece) + t.chunkSize - 1) / t.chunkSize)
} }
func (t *Torrent) chunksPerRegularPiece() uint32 {
return uint32((pp.Integer(t.usualPieceSize()) + t.chunkSize - 1) / t.chunkSize)
}
func (t *Torrent) pendAllChunkSpecs(pieceIndex pieceIndex) { func (t *Torrent) pendAllChunkSpecs(pieceIndex pieceIndex) {
t.pieces[pieceIndex]._dirtyChunks.Clear() t.pieces[pieceIndex]._dirtyChunks.Clear()
} }