Add the DropMutuallyCompletePeers ClientConfig field
This commit is contained in:
parent
ae9d5bce18
commit
ded6c19edb
|
@ -527,7 +527,9 @@ func TestTorrentDownloadAllThenCancel(t *testing.T) {
|
||||||
|
|
||||||
// Ensure that it's an error for a peer to send an invalid have message.
|
// Ensure that it's an error for a peer to send an invalid have message.
|
||||||
func TestPeerInvalidHave(t *testing.T) {
|
func TestPeerInvalidHave(t *testing.T) {
|
||||||
cl, err := NewClient(TestingConfig())
|
cfg := TestingConfig()
|
||||||
|
cfg.DropMutuallyCompletePeers = false
|
||||||
|
cl, err := NewClient(cfg)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer cl.Close()
|
defer cl.Close()
|
||||||
info := metainfo.Info{
|
info := metainfo.Info{
|
||||||
|
@ -548,6 +550,7 @@ func TestPeerInvalidHave(t *testing.T) {
|
||||||
cn := &PeerConn{peer: peer{
|
cn := &PeerConn{peer: peer{
|
||||||
t: tt,
|
t: tt,
|
||||||
}}
|
}}
|
||||||
|
cn.peerImpl = cn
|
||||||
assert.NoError(t, cn.peerSentHave(0))
|
assert.NoError(t, cn.peerSentHave(0))
|
||||||
assert.Error(t, cn.peerSentHave(1))
|
assert.Error(t, cn.peerSentHave(1))
|
||||||
}
|
}
|
||||||
|
|
|
@ -124,6 +124,10 @@ type ClientConfig struct {
|
||||||
// Don't add connections that have the same peer ID as an existing
|
// Don't add connections that have the same peer ID as an existing
|
||||||
// connection for a given Torrent.
|
// connection for a given Torrent.
|
||||||
DropDuplicatePeerIds bool
|
DropDuplicatePeerIds bool
|
||||||
|
// Drop peers that are complete if we are also complete and have no use for the peer. This is a
|
||||||
|
// bit of a special case, since a peer could also be useless if they're just not interested, or
|
||||||
|
// we don't intend to obtain all of a torrent's data.
|
||||||
|
DropMutuallyCompletePeers bool
|
||||||
|
|
||||||
ConnTracker *conntrack.Instance
|
ConnTracker *conntrack.Instance
|
||||||
|
|
||||||
|
@ -170,6 +174,7 @@ func NewDefaultClientConfig() *ClientConfig {
|
||||||
DownloadRateLimiter: unlimited,
|
DownloadRateLimiter: unlimited,
|
||||||
ConnTracker: conntrack.NewInstance(),
|
ConnTracker: conntrack.NewInstance(),
|
||||||
DisableAcceptRateLimiting: true,
|
DisableAcceptRateLimiting: true,
|
||||||
|
DropMutuallyCompletePeers: true,
|
||||||
HeaderObfuscationPolicy: HeaderObfuscationPolicy{
|
HeaderObfuscationPolicy: HeaderObfuscationPolicy{
|
||||||
Preferred: true,
|
Preferred: true,
|
||||||
RequirePreferred: false,
|
RequirePreferred: false,
|
||||||
|
|
|
@ -843,6 +843,7 @@ func (cn *PeerConn) peerPiecesChanged() {
|
||||||
cn.updateRequests()
|
cn.updateRequests()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
cn.t.maybeDropMutuallyCompletePeer(&cn.peer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cn *PeerConn) raisePeerMinPieces(newMin pieceIndex) {
|
func (cn *PeerConn) raisePeerMinPieces(newMin pieceIndex) {
|
||||||
|
@ -860,6 +861,7 @@ func (cn *PeerConn) peerSentHave(piece pieceIndex) error {
|
||||||
}
|
}
|
||||||
cn.raisePeerMinPieces(piece + 1)
|
cn.raisePeerMinPieces(piece + 1)
|
||||||
cn._peerPieces.Set(bitmap.BitIndex(piece), true)
|
cn._peerPieces.Set(bitmap.BitIndex(piece), true)
|
||||||
|
cn.t.maybeDropMutuallyCompletePeer(&cn.peer)
|
||||||
if cn.updatePiecePriority(piece) {
|
if cn.updatePiecePriority(piece) {
|
||||||
cn.updateRequests()
|
cn.updateRequests()
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,6 +53,8 @@ func testClientTransfer(t *testing.T, ps testClientTransferParams) {
|
||||||
// Create seeder and a Torrent.
|
// Create seeder and a Torrent.
|
||||||
cfg := torrent.TestingConfig()
|
cfg := torrent.TestingConfig()
|
||||||
cfg.Seed = true
|
cfg.Seed = true
|
||||||
|
// Some test instances don't like this being on, even when there's no cache involved.
|
||||||
|
cfg.DropMutuallyCompletePeers = false
|
||||||
if ps.SeederUploadRateLimiter != nil {
|
if ps.SeederUploadRateLimiter != nil {
|
||||||
cfg.UploadRateLimiter = ps.SeederUploadRateLimiter
|
cfg.UploadRateLimiter = ps.SeederUploadRateLimiter
|
||||||
}
|
}
|
||||||
|
@ -84,6 +86,8 @@ func testClientTransfer(t *testing.T, ps testClientTransferParams) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer os.RemoveAll(leecherDataDir)
|
defer os.RemoveAll(leecherDataDir)
|
||||||
cfg = torrent.TestingConfig()
|
cfg = torrent.TestingConfig()
|
||||||
|
// See the seeder client config comment.
|
||||||
|
cfg.DropMutuallyCompletePeers = false
|
||||||
if ps.LeecherStorage == nil {
|
if ps.LeecherStorage == nil {
|
||||||
cfg.DataDir = leecherDataDir
|
cfg.DataDir = leecherDataDir
|
||||||
} else {
|
} else {
|
||||||
|
@ -142,7 +146,12 @@ func testClientTransfer(t *testing.T, ps testClientTransferParams) {
|
||||||
assertReadAllGreeting(t, r)
|
assertReadAllGreeting(t, r)
|
||||||
assert.NotEmpty(t, seederTorrent.PeerConns())
|
assert.NotEmpty(t, seederTorrent.PeerConns())
|
||||||
leecherPeerConns := leecherTorrent.PeerConns()
|
leecherPeerConns := leecherTorrent.PeerConns()
|
||||||
assert.NotEmpty(t, leecherPeerConns)
|
if cfg.DropMutuallyCompletePeers {
|
||||||
|
// I don't think we can assume it will be empty already, due to timing.
|
||||||
|
//assert.Empty(t, leecherPeerConns)
|
||||||
|
} else {
|
||||||
|
assert.NotEmpty(t, leecherPeerConns)
|
||||||
|
}
|
||||||
foundSeeder := false
|
foundSeeder := false
|
||||||
for _, pc := range leecherPeerConns {
|
for _, pc := range leecherPeerConns {
|
||||||
completed := pc.PeerPieces().Len()
|
completed := pc.PeerPieces().Len()
|
||||||
|
|
21
torrent.go
21
torrent.go
|
@ -826,6 +826,26 @@ func (t *Torrent) havePiece(index pieceIndex) bool {
|
||||||
return t.haveInfo() && t.pieceComplete(index)
|
return t.haveInfo() && t.pieceComplete(index)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Torrent) maybeDropMutuallyCompletePeer(
|
||||||
|
// I'm not sure about taking peer here, not all peer implementations actually drop. Maybe that's okay?
|
||||||
|
p *peer,
|
||||||
|
) {
|
||||||
|
if !t.cl.config.DropMutuallyCompletePeers {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !t.haveAllPieces() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if all, known := p.peerHasAllPieces(); !(known && all) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if p.useful() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Printf("dropping %v, which is mutually complete", p)
|
||||||
|
p.drop()
|
||||||
|
}
|
||||||
|
|
||||||
func (t *Torrent) haveChunk(r request) (ret bool) {
|
func (t *Torrent) haveChunk(r request) (ret bool) {
|
||||||
// defer func() {
|
// defer func() {
|
||||||
// log.Println("have chunk", r, ret)
|
// log.Println("have chunk", r, ret)
|
||||||
|
@ -1808,6 +1828,7 @@ func (t *Torrent) onPieceCompleted(piece pieceIndex) {
|
||||||
t.cancelRequestsForPiece(piece)
|
t.cancelRequestsForPiece(piece)
|
||||||
for conn := range t.conns {
|
for conn := range t.conns {
|
||||||
conn.have(piece)
|
conn.have(piece)
|
||||||
|
t.maybeDropMutuallyCompletePeer(&conn.peer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue