Remove requests as soon as chunk data is received

Note that this breaks the backpressure on webseed responses again, and should be fixed shortly.
This commit is contained in:
Matt Joiner 2021-01-28 16:36:35 +11:00
parent ca1497ad58
commit 1ac5811990
3 changed files with 16 additions and 5 deletions

View File

@ -1365,7 +1365,14 @@ func (c *Peer) receiveChunk(msg *pp.Message) error {
torrent.Add("chunks received due to allowed fast", 1)
}
defer func() {
// TODO: This needs to happen immediately, to prevent cancels occurring asynchronously when have
// actually already received the piece, while we have the Client unlocked to write the data out.
{
if _, ok := c.requests[req]; ok {
for _, f := range c.callbacks.ReceivedRequested {
f(PeerMessageEvent{c, msg})
}
}
// Request has been satisfied.
if c.deleteRequest(req) {
if c.expectingChunks() {
@ -1374,7 +1381,7 @@ func (c *Peer) receiveChunk(msg *pp.Message) error {
} else {
torrent.Add("chunks received unwanted", 1)
}
}()
}
// Do we actually want this chunk?
if t.haveChunk(req) {

View File

@ -2117,10 +2117,13 @@ func (t *Torrent) addWebSeed(url string) {
Network: "http",
reconciledHandshakeStats: true,
peerSentHaveAll: true,
PeerMaxRequests: maxRequests,
RemoteAddr: remoteAddrFromUrl(url),
// TODO: Raise this limit, and instead limit concurrent fetches.
PeerMaxRequests: maxRequests,
RemoteAddr: remoteAddrFromUrl(url),
},
client: webseed.Client{
// TODO: Investigate a MaxConnsPerHost in the transport for this, possibly in a global
// Client.
HttpClient: http.DefaultClient,
Url: url,
},

View File

@ -13,7 +13,8 @@ import (
)
type webseedPeer struct {
client webseed.Client
client webseed.Client
// TODO: Remove finished entries from this.
requests map[Request]webseed.Request
peer Peer
}