diff --git a/client.go b/client.go index 8544d4e3..9300b24b 100644 --- a/client.go +++ b/client.go @@ -969,8 +969,9 @@ func (cl *Client) runHandshookConn(c *PeerConn, t *Torrent) error { return nil } -// If peer requests are buffered on read, this instructs the amount of memory that might be used to -// cache pending writes. Assuming 512KiB cached for sending, for 16KiB chunks. +// Maximum pending requests we allow peers to send us. If peer requests are buffered on read, this +// instructs the amount of memory that might be used to cache pending writes. Assuming 512KiB +// (1<<19) cached for sending, for 16KiB (1<<14) chunks. const localClientReqq = 1 << 5 // See the order given in Transmission's tr_peerMsgsNew. diff --git a/global.go b/global.go index 1a09b06e..e06d9323 100644 --- a/global.go +++ b/global.go @@ -9,7 +9,6 @@ import ( const ( pieceHash = crypto.SHA1 - maxRequests = 250 // Maximum pending requests we allow peers to send us. defaultChunkSize = 0x4000 // 16KiB ) diff --git a/peerconn.go b/peerconn.go index a5d078e0..71ac5f3d 100644 --- a/peerconn.go +++ b/peerconn.go @@ -989,7 +989,8 @@ func (c *PeerConn) onReadRequest(r Request) error { } return nil } - if len(c.peerRequests) >= maxRequests { + // TODO: What if they've already requested this? + if len(c.peerRequests) >= localClientReqq { torrent.Add("requests received while queue full", 1) if c.fastEnabled() { c.reject(r) @@ -1010,7 +1011,7 @@ func (c *PeerConn) onReadRequest(r Request) error { return errors.New("bad Request") } if c.peerRequests == nil { - c.peerRequests = make(map[Request]*peerRequestState, maxRequests) + c.peerRequests = make(map[Request]*peerRequestState, localClientReqq) } value := &peerRequestState{} c.peerRequests[r] = value