Sleep webseed peers after unhandled errors
This commit is contained in:
parent
61a303cbf7
commit
14cf045b6a
|
@ -26,7 +26,7 @@ type peerImpl interface {
|
||||||
// Rebuke the peer
|
// Rebuke the peer
|
||||||
ban()
|
ban()
|
||||||
String() string
|
String() string
|
||||||
connStatusString() string
|
peerImplStatusLines() []string
|
||||||
|
|
||||||
// All if the peer should have everything, known if we know that for a fact. For example, we can
|
// All if the peer should have everything, known if we know that for a fact. For example, we can
|
||||||
// guess at how many pieces are in a torrent, and assume they have all pieces based on them
|
// guess at how many pieces are in a torrent, and assume they have all pieces based on them
|
||||||
|
|
|
@ -171,8 +171,8 @@ type PeerConn struct {
|
||||||
peerSentHaveAll bool
|
peerSentHaveAll bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cn *PeerConn) connStatusString() string {
|
func (cn *PeerConn) peerImplStatusLines() []string {
|
||||||
return fmt.Sprintf("%+-55q %s %s", cn.PeerID, cn.PeerExtensionBytes, cn.connString)
|
return []string{fmt.Sprintf("%+-55q %s %s", cn.PeerID, cn.PeerExtensionBytes, cn.connString)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cn *Peer) updateExpectingChunks() {
|
func (cn *Peer) updateExpectingChunks() {
|
||||||
|
@ -389,7 +389,7 @@ func (cn *Peer) writeStatus(w io.Writer, t *Torrent) {
|
||||||
if cn.closed.IsSet() {
|
if cn.closed.IsSet() {
|
||||||
fmt.Fprint(w, "CLOSED: ")
|
fmt.Fprint(w, "CLOSED: ")
|
||||||
}
|
}
|
||||||
fmt.Fprintln(w, cn.connStatusString())
|
fmt.Fprintln(w, strings.Join(cn.peerImplStatusLines(), "\n"))
|
||||||
prio, err := cn.peerPriority()
|
prio, err := cn.peerPriority()
|
||||||
prioStr := fmt.Sprintf("%08x", prio)
|
prioStr := fmt.Sprintf("%08x", prio)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -770,9 +770,14 @@ func (t *Torrent) writeStatus(w io.Writer) {
|
||||||
}
|
}
|
||||||
return worseConn(i, j)
|
return worseConn(i, j)
|
||||||
})
|
})
|
||||||
|
var buf bytes.Buffer
|
||||||
for i, c := range peers {
|
for i, c := range peers {
|
||||||
fmt.Fprintf(w, "%2d. ", i+1)
|
fmt.Fprintf(w, "%2d. ", i+1)
|
||||||
c.writeStatus(w, t)
|
buf.Reset()
|
||||||
|
c.writeStatus(&buf, t)
|
||||||
|
w.Write(bytes.TrimRight(
|
||||||
|
bytes.ReplaceAll(buf.Bytes(), []byte("\n"), []byte("\n ")),
|
||||||
|
" "))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2425,7 +2430,6 @@ func (t *Torrent) addWebSeed(url string, opts ...AddWebSeedsOpt) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
activeRequests: make(map[Request]webseed.Request, maxRequests),
|
activeRequests: make(map[Request]webseed.Request, maxRequests),
|
||||||
maxRequests: maxRequests,
|
|
||||||
}
|
}
|
||||||
ws.peer.initRequestState()
|
ws.peer.initRequestState()
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
|
|
|
@ -16,20 +16,27 @@ import (
|
||||||
"github.com/anacrolix/torrent/webseed"
|
"github.com/anacrolix/torrent/webseed"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
webseedPeerUnhandledErrorSleep = 5 * time.Second
|
||||||
|
webseedPeerCloseOnUnhandledError = false
|
||||||
|
)
|
||||||
|
|
||||||
type webseedPeer struct {
|
type webseedPeer struct {
|
||||||
// First field for stats alignment.
|
// First field for stats alignment.
|
||||||
peer Peer
|
peer Peer
|
||||||
client webseed.Client
|
client webseed.Client
|
||||||
activeRequests map[Request]webseed.Request
|
activeRequests map[Request]webseed.Request
|
||||||
requesterCond sync.Cond
|
requesterCond sync.Cond
|
||||||
// Number of requester routines.
|
lastUnhandledErr time.Time
|
||||||
maxRequests int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ peerImpl = (*webseedPeer)(nil)
|
var _ peerImpl = (*webseedPeer)(nil)
|
||||||
|
|
||||||
func (me *webseedPeer) connStatusString() string {
|
func (me *webseedPeer) peerImplStatusLines() []string {
|
||||||
return me.client.Url
|
return []string{
|
||||||
|
me.client.Url,
|
||||||
|
fmt.Sprintf("last unhandled error: %v", eventAgeString(me.lastUnhandledErr)),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ws *webseedPeer) String() string {
|
func (ws *webseedPeer) String() string {
|
||||||
|
@ -86,6 +93,7 @@ func (ws *webseedPeer) requester(i int) {
|
||||||
defer ws.requesterCond.L.Unlock()
|
defer ws.requesterCond.L.Unlock()
|
||||||
start:
|
start:
|
||||||
for !ws.peer.closed.IsSet() {
|
for !ws.peer.closed.IsSet() {
|
||||||
|
// Restart is set if we don't need to wait for the requestCond before trying again.
|
||||||
restart := false
|
restart := false
|
||||||
ws.peer.requestState.Requests.Iterate(func(x RequestIndex) bool {
|
ws.peer.requestState.Requests.Iterate(func(x RequestIndex) bool {
|
||||||
r := ws.peer.t.requestIndexToRequest(x)
|
r := ws.peer.t.requestIndexToRequest(x)
|
||||||
|
@ -101,6 +109,7 @@ start:
|
||||||
if errors.Is(err, webseed.ErrTooFast) {
|
if errors.Is(err, webseed.ErrTooFast) {
|
||||||
time.Sleep(time.Duration(rand.Int63n(int64(10 * time.Second))))
|
time.Sleep(time.Duration(rand.Int63n(int64(10 * time.Second))))
|
||||||
}
|
}
|
||||||
|
time.Sleep(time.Until(ws.lastUnhandledErr.Add(webseedPeerUnhandledErrorSleep)))
|
||||||
ws.requesterCond.L.Lock()
|
ws.requesterCond.L.Lock()
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
|
@ -172,8 +181,13 @@ func (ws *webseedPeer) requestResultHandler(r Request, webseedRequest webseed.Re
|
||||||
// cfg := spew.NewDefaultConfig()
|
// cfg := spew.NewDefaultConfig()
|
||||||
// cfg.DisableMethods = true
|
// cfg.DisableMethods = true
|
||||||
// cfg.Dump(result.Err)
|
// cfg.Dump(result.Err)
|
||||||
|
|
||||||
|
if webseedPeerCloseOnUnhandledError {
|
||||||
log.Printf("closing %v", ws)
|
log.Printf("closing %v", ws)
|
||||||
ws.peer.close()
|
ws.peer.close()
|
||||||
|
} else {
|
||||||
|
ws.lastUnhandledErr = time.Now()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if !ws.peer.remoteRejectedRequest(ws.peer.t.requestIndexFromRequest(r)) {
|
if !ws.peer.remoteRejectedRequest(ws.peer.t.requestIndexFromRequest(r)) {
|
||||||
panic("invalid reject")
|
panic("invalid reject")
|
||||||
|
|
Loading…
Reference in New Issue