FedP2P/conn_stats.go

122 lines
2.8 KiB
Go
Raw Normal View History

2016-07-05 13:52:33 +08:00
package torrent
import (
2019-08-21 18:58:40 +08:00
"encoding/json"
2018-06-12 20:40:04 +08:00
"fmt"
2016-07-12 14:42:04 +08:00
"io"
2018-06-12 18:21:53 +08:00
"reflect"
"sync/atomic"
2016-07-12 14:42:04 +08:00
2016-07-05 13:52:33 +08:00
pp "github.com/anacrolix/torrent/peer_protocol"
)
// Various connection-level metrics. At the Torrent level these are
// aggregates. Chunks are messages with data payloads. Data is actual torrent
// content without any overhead. Useful is something we needed locally.
// Unwanted is something we didn't ask for (but may still be useful). Written
// is things sent to the peer, and Read is stuff received from them.
2016-07-05 13:52:33 +08:00
type ConnStats struct {
2016-07-12 14:42:04 +08:00
// Total bytes on the wire. Includes handshakes and encryption.
2018-06-12 18:21:53 +08:00
BytesWritten Count
BytesWrittenData Count
2018-06-12 18:21:53 +08:00
BytesRead Count
BytesReadData Count
BytesReadUsefulData Count
2018-06-12 18:21:53 +08:00
ChunksWritten Count
ChunksRead Count
ChunksReadUseful Count
ChunksReadWasted Count
MetadataChunksRead Count
// Number of pieces data was written to, that subsequently passed verification.
2018-06-12 18:21:53 +08:00
PiecesDirtiedGood Count
2020-01-22 12:56:16 +08:00
// Number of pieces data was written to, that subsequently failed verification. Note that a
// connection may not have been the sole dirtier of a piece.
2018-06-12 18:21:53 +08:00
PiecesDirtiedBad Count
}
func (me *ConnStats) Copy() (ret ConnStats) {
for i := 0; i < reflect.TypeOf(ConnStats{}).NumField(); i++ {
n := reflect.ValueOf(me).Elem().Field(i).Addr().Interface().(*Count).Int64()
reflect.ValueOf(&ret).Elem().Field(i).Addr().Interface().(*Count).Add(n)
}
return
}
type Count struct {
n int64
}
2018-06-12 20:40:04 +08:00
var _ fmt.Stringer = (*Count)(nil)
2018-06-12 18:21:53 +08:00
func (me *Count) Add(n int64) {
atomic.AddInt64(&me.n, n)
}
func (me *Count) Int64() int64 {
return atomic.LoadInt64(&me.n)
2016-07-05 13:52:33 +08:00
}
2018-06-12 20:40:04 +08:00
func (me *Count) String() string {
return fmt.Sprintf("%v", me.Int64())
}
func (me *Count) MarshalJSON() ([]byte, error) {
return json.Marshal(me.n)
}
2016-07-12 14:42:04 +08:00
func (cs *ConnStats) wroteMsg(msg *pp.Message) {
2018-02-04 16:14:07 +08:00
// TODO: Track messages and not just chunks.
2016-07-05 13:52:33 +08:00
switch msg.Type {
case pp.Piece:
2018-06-12 18:21:53 +08:00
cs.ChunksWritten.Add(1)
cs.BytesWrittenData.Add(int64(len(msg.Piece)))
2016-07-05 13:52:33 +08:00
}
}
2016-07-12 14:42:04 +08:00
func (cs *ConnStats) readMsg(msg *pp.Message) {
// We want to also handle extended metadata pieces here, but we wouldn't
// have decoded the extended payload yet.
2016-07-12 14:42:04 +08:00
switch msg.Type {
case pp.Piece:
2018-06-12 18:21:53 +08:00
cs.ChunksRead.Add(1)
cs.BytesReadData.Add(int64(len(msg.Piece)))
2016-07-12 14:42:04 +08:00
}
}
func (cs *ConnStats) incrementPiecesDirtiedGood() {
2018-06-12 18:21:53 +08:00
cs.PiecesDirtiedGood.Add(1)
2016-07-12 14:42:04 +08:00
}
func (cs *ConnStats) incrementPiecesDirtiedBad() {
2018-06-12 18:21:53 +08:00
cs.PiecesDirtiedBad.Add(1)
}
2018-06-12 18:21:53 +08:00
func add(n int64, f func(*ConnStats) *Count) func(*ConnStats) {
return func(cs *ConnStats) {
p := f(cs)
2018-06-12 18:21:53 +08:00
p.Add(n)
}
2016-07-12 14:42:04 +08:00
}
type connStatsReadWriter struct {
rw io.ReadWriter
c *PeerConn
2016-07-12 14:42:04 +08:00
}
func (me connStatsReadWriter) Write(b []byte) (n int, err error) {
n, err = me.rw.Write(b)
2018-06-12 18:21:53 +08:00
me.c.wroteBytes(int64(n))
2016-07-12 14:42:04 +08:00
return
}
func (me connStatsReadWriter) Read(b []byte) (n int, err error) {
n, err = me.rw.Read(b)
2018-06-12 18:21:53 +08:00
me.c.readBytes(int64(n))
2016-07-12 14:42:04 +08:00
return
2016-07-05 13:52:33 +08:00
}