Don't always force piece reallocation
The balancing for unforced pieces may not be exactly what we want yet.
This commit is contained in:
parent
95d808d3c5
commit
1a27e14391
|
@ -74,6 +74,7 @@ func (me *peersForPieceRequests) addNextRequest(r Request) {
|
|||
type requestablePiece struct {
|
||||
index pieceIndex
|
||||
t Torrent
|
||||
alwaysReallocate bool
|
||||
NumPendingChunks int
|
||||
IterPendingChunks ChunksIter
|
||||
}
|
||||
|
@ -137,6 +138,7 @@ func getRequestablePieces(input Input) (ret []requestablePiece) {
|
|||
t: piece.t.Torrent,
|
||||
NumPendingChunks: piece.NumPendingChunks,
|
||||
IterPendingChunks: piece.iterPendingChunksWrapper,
|
||||
alwaysReallocate: piece.Priority >= types.PiecePriorityNext,
|
||||
})
|
||||
}
|
||||
return
|
||||
|
@ -201,23 +203,42 @@ func allocatePendingChunks(p requestablePiece, peers []*requestsPeer) {
|
|||
}
|
||||
}
|
||||
}()
|
||||
sortPeersForPiece := func(byHasRequest *Request) {
|
||||
sortPeersForPiece := func(req *Request) {
|
||||
sort.Slice(peersForPiece, func(i, j int) bool {
|
||||
ml := multiless.New().Int(
|
||||
peersForPiece[i].requestsInPiece,
|
||||
peersForPiece[j].requestsInPiece,
|
||||
).Int(
|
||||
byHasRequest := func() multiless.Computation {
|
||||
ml := multiless.New()
|
||||
if req != nil {
|
||||
_, iHas := peersForPiece[i].nextState.Requests[*req]
|
||||
_, jHas := peersForPiece[j].nextState.Requests[*req]
|
||||
ml = ml.Bool(jHas, iHas)
|
||||
}
|
||||
return ml
|
||||
}()
|
||||
ml := multiless.New()
|
||||
// We always "reallocate", that is force even striping amongst peers that are either on
|
||||
// the last piece they can contribute too, or for pieces marked for this behaviour.
|
||||
// Striping prevents starving peers of requests, and will always re-balance to the
|
||||
// fastest known peers.
|
||||
if !p.alwaysReallocate {
|
||||
ml = ml.Bool(
|
||||
peersForPiece[j].requestablePiecesRemaining == 1,
|
||||
peersForPiece[i].requestablePiecesRemaining == 1)
|
||||
}
|
||||
if p.alwaysReallocate || peersForPiece[j].requestablePiecesRemaining == 1 {
|
||||
ml = ml.Int(
|
||||
peersForPiece[i].requestsInPiece,
|
||||
peersForPiece[j].requestsInPiece)
|
||||
} else {
|
||||
ml = ml.AndThen(byHasRequest)
|
||||
}
|
||||
ml = ml.Int(
|
||||
peersForPiece[i].requestablePiecesRemaining,
|
||||
peersForPiece[j].requestablePiecesRemaining,
|
||||
).Float64(
|
||||
peersForPiece[j].DownloadRate,
|
||||
peersForPiece[i].DownloadRate,
|
||||
)
|
||||
if byHasRequest != nil {
|
||||
_, iHas := peersForPiece[i].nextState.Requests[*byHasRequest]
|
||||
_, jHas := peersForPiece[j].nextState.Requests[*byHasRequest]
|
||||
ml = ml.Bool(jHas, iHas)
|
||||
}
|
||||
ml = ml.AndThen(byHasRequest)
|
||||
return ml.Int64(
|
||||
int64(peersForPiece[j].Age), int64(peersForPiece[i].Age),
|
||||
// TODO: Probably peer priority can come next
|
||||
|
|
|
@ -187,7 +187,9 @@ func TestDontStealUnnecessarily(t *testing.T) {
|
|||
// Slower than the stealers, but has all requests already.
|
||||
stealee := basePeer
|
||||
stealee.DownloadRate = 1
|
||||
keepReqs := requestSetFromSlice(r(0, 0), r(0, 1), r(0, 2))
|
||||
keepReqs := requestSetFromSlice(
|
||||
r(3, 2), r(3, 4), r(3, 6), r(3, 8),
|
||||
r(4, 0), r(4, 1), r(4, 7), r(4, 8))
|
||||
stealee.HasExistingRequest = func(r Request) bool {
|
||||
_, ok := keepReqs[r]
|
||||
return ok
|
||||
|
@ -197,12 +199,41 @@ func TestDontStealUnnecessarily(t *testing.T) {
|
|||
firstStealer.Id = intPeerId(2)
|
||||
secondStealer := basePeer
|
||||
secondStealer.Id = intPeerId(3)
|
||||
secondStealer.HasPiece = func(i pieceIndex) bool {
|
||||
switch i {
|
||||
case 1, 3:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
results := Run(Input{Torrents: []Torrent{{
|
||||
Pieces: []Piece{{
|
||||
Request: true,
|
||||
NumPendingChunks: 9,
|
||||
IterPendingChunks: chunkIterRange(9),
|
||||
}},
|
||||
Pieces: []Piece{
|
||||
{
|
||||
Request: true,
|
||||
NumPendingChunks: 0,
|
||||
IterPendingChunks: chunkIterRange(9),
|
||||
},
|
||||
{
|
||||
Request: true,
|
||||
NumPendingChunks: 7,
|
||||
IterPendingChunks: chunkIterRange(7),
|
||||
},
|
||||
{
|
||||
Request: true,
|
||||
NumPendingChunks: 0,
|
||||
IterPendingChunks: chunkIterRange(0),
|
||||
},
|
||||
{
|
||||
Request: true,
|
||||
NumPendingChunks: 9,
|
||||
IterPendingChunks: chunkIterRange(9),
|
||||
},
|
||||
{
|
||||
Request: true,
|
||||
NumPendingChunks: 9,
|
||||
IterPendingChunks: chunkIterRange(9),
|
||||
}},
|
||||
Peers: []Peer{
|
||||
firstStealer,
|
||||
stealee,
|
||||
|
@ -215,10 +246,10 @@ func TestDontStealUnnecessarily(t *testing.T) {
|
|||
c.Check(results[p].Requests, qt.HasLen, l)
|
||||
c.Check(results[p].Interested, qt.Equals, l > 0)
|
||||
}
|
||||
check(firstStealer.Id, 3)
|
||||
check(secondStealer.Id, 3)
|
||||
check(firstStealer.Id, 5)
|
||||
check(secondStealer.Id, 7+9)
|
||||
c.Check(results[stealee.Id], qt.ContentEquals, PeerNextRequestState{
|
||||
Interested: true,
|
||||
Requests: keepReqs,
|
||||
Requests: requestSetFromSlice(r(4, 0), r(4, 1), r(4, 7), r(4, 8)),
|
||||
})
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue