diff --git a/bad_storage.go b/bad_storage.go index f984c639..fc15beb9 100644 --- a/bad_storage.go +++ b/bad_storage.go @@ -15,11 +15,9 @@ type badStorage struct{} var _ storage.ClientImpl = badStorage{} func (bs badStorage) OpenTorrent(*metainfo.Info, metainfo.Hash) (storage.TorrentImpl, error) { - return bs, nil -} - -func (bs badStorage) Close() error { - return nil + return storage.TorrentImpl{ + Piece: bs.Piece, + }, nil } func (bs badStorage) Piece(p metainfo.Piece) storage.PieceImpl { diff --git a/bencode/decode.go b/bencode/decode.go index 8b22fa73..43bb9ce6 100644 --- a/bencode/decode.go +++ b/bencode/decode.go @@ -205,68 +205,70 @@ func (d *Decoder) parseString(v reflect.Value) error { // Info for parsing a dict value. type dictField struct { - Value reflect.Value // Storage for the parsed value. - // True if field value should be parsed into Value. If false, the value - // should be parsed and discarded. - Ok bool - Set func() // Call this after parsing into Value. - IgnoreUnmarshalTypeError bool + Type reflect.Type + Get func(value reflect.Value) func(reflect.Value) + Tags tag } // Returns specifics for parsing a dict field value. -func getDictField(dict reflect.Value, key string) dictField { +func getDictField(dict reflect.Type, key string) dictField { // get valuev as a map value or as a struct field switch dict.Kind() { case reflect.Map: - value := reflect.New(dict.Type().Elem()).Elem() return dictField{ - Value: value, - Ok: true, - Set: func() { - if dict.IsNil() { - dict.Set(reflect.MakeMap(dict.Type())) + Type: dict.Elem(), + Get: func(mapValue reflect.Value) func(reflect.Value) { + return func(value reflect.Value) { + if mapValue.IsNil() { + mapValue.Set(reflect.MakeMap(dict)) + } + // Assigns the value into the map. + //log.Printf("map type: %v", mapValue.Type()) + mapValue.SetMapIndex(reflect.ValueOf(key).Convert(dict.Key()), value) } - // Assigns the value into the map. - dict.SetMapIndex(reflect.ValueOf(key).Convert(dict.Type().Key()), value) }, } case reflect.Struct: - sf, ok := getStructFieldForKey(dict.Type(), key) - if !ok { - return dictField{} - } - if sf.r.PkgPath != "" { - panic(&UnmarshalFieldError{ - Key: key, - Type: dict.Type(), - Field: sf.r, - }) - } - return dictField{ - Value: dict.FieldByIndex(sf.r.Index), - Ok: true, - Set: func() {}, - IgnoreUnmarshalTypeError: sf.tag.IgnoreUnmarshalTypeError(), - } + return getStructFieldForKey(dict, key) + //if sf.r.PkgPath != "" { + // panic(&UnmarshalFieldError{ + // Key: key, + // Type: dict.Type(), + // Field: sf.r, + // }) + //} default: + panic("unimplemented") return dictField{} } } -type structField struct { - r reflect.StructField - tag tag -} - var ( structFieldsMu sync.Mutex - structFields = map[reflect.Type]map[string]structField{} + structFields = map[reflect.Type]map[string]dictField{} ) -func parseStructFields(struct_ reflect.Type, each func(string, structField)) { - for i, n := 0, struct_.NumField(); i < n; i++ { +func parseStructFields(struct_ reflect.Type, each func(key string, df dictField)) { + for _i, n := 0, struct_.NumField(); _i < n; _i++ { + i := _i f := struct_.Field(i) if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + parseStructFields(t, func(key string, df dictField) { + innerGet := df.Get + df.Get = func(value reflect.Value) func(reflect.Value) { + anonPtr := value.Field(i) + if anonPtr.Kind() == reflect.Ptr && anonPtr.IsNil() { + anonPtr.Set(reflect.New(f.Type.Elem())) + anonPtr = anonPtr.Elem() + } + return innerGet(anonPtr) + } + each(key, df) + }) continue } tagStr := f.Tag.Get("bencode") @@ -278,25 +280,35 @@ func parseStructFields(struct_ reflect.Type, each func(string, structField)) { if key == "" { key = f.Name } - each(key, structField{f, tag}) + each(key, dictField{f.Type, func(value reflect.Value) func(reflect.Value) { + return value.Field(i).Set + }, tag}) } } func saveStructFields(struct_ reflect.Type) { - m := make(map[string]structField) - parseStructFields(struct_, func(key string, sf structField) { + m := make(map[string]dictField) + parseStructFields(struct_, func(key string, sf dictField) { m[key] = sf }) structFields[struct_] = m } -func getStructFieldForKey(struct_ reflect.Type, key string) (f structField, ok bool) { +func getStructFieldForKey(struct_ reflect.Type, key string) (f dictField) { structFieldsMu.Lock() if _, ok := structFields[struct_]; !ok { saveStructFields(struct_) } - f, ok = structFields[struct_][key] + f, ok := structFields[struct_][key] structFieldsMu.Unlock() + if !ok { + var discard interface{} + return dictField{ + Type: reflect.TypeOf(discard), + Get: func(reflect.Value) func(reflect.Value) { return func(reflect.Value) {} }, + Tags: nil, + } + } return } @@ -314,31 +326,33 @@ func (d *Decoder) parseDict(v reflect.Value) error { return nil } - df := getDictField(v, keyStr) + df := getDictField(v.Type(), keyStr) // now we need to actually parse it - if df.Ok { - // log.Printf("parsing ok struct field for key %q", keyStr) - ok, err = d.parseValue(df.Value) - } else { + if df.Type == nil { // Discard the value, there's nowhere to put it. var if_ interface{} if_, ok = d.parseValueInterface() if if_ == nil { - err = fmt.Errorf("error parsing value for key %q", keyStr) + return fmt.Errorf("error parsing value for key %q", keyStr) } + if !ok { + return fmt.Errorf("missing value for key %q", keyStr) + } + continue } + setValue := reflect.New(df.Type).Elem() + //log.Printf("parsing into %v", setValue.Type()) + ok, err = d.parseValue(setValue) if err != nil { - if _, ok := err.(*UnmarshalTypeError); !ok || !df.IgnoreUnmarshalTypeError { + if _, ok := err.(*UnmarshalTypeError); !ok || !df.Tags.IgnoreUnmarshalTypeError() { return fmt.Errorf("parsing value for key %q: %s", keyStr, err) } } if !ok { return fmt.Errorf("missing value for key %q", keyStr) } - if df.Ok { - df.Set() - } + df.Get(v)(setValue) } } diff --git a/bencode/decode_test.go b/bencode/decode_test.go index 4b72edbb..056a399a 100644 --- a/bencode/decode_test.go +++ b/bencode/decode_test.go @@ -7,6 +7,7 @@ import ( "reflect" "testing" + qt "github.com/frankban/quicktest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -144,7 +145,7 @@ func TestIgnoreUnmarshalTypeError(t *testing.T) { }{} require.Error(t, Unmarshal([]byte("d6:Normal5:helloe"), &s)) assert.NoError(t, Unmarshal([]byte("d6:Ignore5:helloe"), &s)) - require.Nil(t, Unmarshal([]byte("d6:Ignorei42ee"), &s)) + qt.Assert(t, Unmarshal([]byte("d6:Ignorei42ee"), &s), qt.IsNil) assert.EqualValues(t, 42, s.Ignore) } diff --git a/bencode/encode.go b/bencode/encode.go index 443c11e7..05153a8c 100644 --- a/bencode/encode.go +++ b/bencode/encode.go @@ -133,13 +133,16 @@ func (e *Encoder) reflectValue(v reflect.Value) { e.reflectString(v.String()) case reflect.Struct: e.writeString("d") - for _, ef := range encodeFields(v.Type()) { - field_value := v.Field(ef.i) - if ef.omit_empty && isEmptyValue(field_value) { + for _, ef := range getEncodeFields(v.Type()) { + fieldValue := ef.i(v) + if !fieldValue.IsValid() { + continue + } + if ef.omitEmpty && isEmptyValue(fieldValue) { continue } e.reflectString(ef.tag) - e.reflectValue(field_value) + e.reflectValue(fieldValue) } e.writeString("e") case reflect.Map: @@ -190,9 +193,9 @@ func (e *Encoder) reflectValue(v reflect.Value) { } type encodeField struct { - i int - tag string - omit_empty bool + i func(v reflect.Value) reflect.Value + tag string + omitEmpty bool } type encodeFieldsSortType []encodeField @@ -206,31 +209,55 @@ var ( encodeFieldsCache = make(map[reflect.Type][]encodeField) ) -func encodeFields(t reflect.Type) []encodeField { +func getEncodeFields(t reflect.Type) []encodeField { typeCacheLock.RLock() fs, ok := encodeFieldsCache[t] typeCacheLock.RUnlock() if ok { return fs } - + fs = makeEncodeFields(t) typeCacheLock.Lock() defer typeCacheLock.Unlock() - fs, ok = encodeFieldsCache[t] - if ok { - return fs - } + encodeFieldsCache[t] = fs + return fs +} - for i, n := 0, t.NumField(); i < n; i++ { +func makeEncodeFields(t reflect.Type) (fs []encodeField) { + for _i, n := 0, t.NumField(); _i < n; _i++ { + i := _i f := t.Field(i) if f.PkgPath != "" { continue } if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + anonEFs := makeEncodeFields(t) + for aefi := range anonEFs { + anonEF := anonEFs[aefi] + bottomField := anonEF + bottomField.i = func(v reflect.Value) reflect.Value { + v = v.Field(i) + if v.Kind() == reflect.Ptr { + if v.IsNil() { + // This will skip serializing this value. + return reflect.Value{} + } + v = v.Elem() + } + return anonEF.i(v) + } + fs = append(fs, bottomField) + } continue } var ef encodeField - ef.i = i + ef.i = func(v reflect.Value) reflect.Value { + return v.Field(i) + } ef.tag = f.Name tv := getTag(f.Tag) @@ -240,11 +267,10 @@ func encodeFields(t reflect.Type) []encodeField { if tv.Key() != "" { ef.tag = tv.Key() } - ef.omit_empty = tv.OmitEmpty() + ef.omitEmpty = tv.OmitEmpty() fs = append(fs, ef) } fss := encodeFieldsSortType(fs) sort.Sort(fss) - encodeFieldsCache[t] = fs return fs } diff --git a/bencode/tags.go b/bencode/tags.go index 50bdc72b..d4adeb24 100644 --- a/bencode/tags.go +++ b/bencode/tags.go @@ -24,6 +24,9 @@ func (me tag) Key() string { } func (me tag) HasOpt(opt string) bool { + if len(me) < 1 { + return false + } for _, s := range me[1:] { if s == opt { return true diff --git a/client.go b/client.go index cab4c234..2920c263 100644 --- a/client.go +++ b/client.go @@ -2,7 +2,6 @@ package torrent import ( "bufio" - "bytes" "context" "crypto/rand" "encoding/binary" @@ -18,15 +17,14 @@ import ( "github.com/anacrolix/dht/v2" "github.com/anacrolix/dht/v2/krpc" "github.com/anacrolix/log" - "github.com/anacrolix/missinggo/bitmap" "github.com/anacrolix/missinggo/perf" "github.com/anacrolix/missinggo/pubsub" "github.com/anacrolix/missinggo/slices" + "github.com/anacrolix/missinggo/v2" + "github.com/anacrolix/missinggo/v2/bitmap" + "github.com/anacrolix/missinggo/v2/conntrack" "github.com/anacrolix/missinggo/v2/pproffd" "github.com/anacrolix/sync" - "github.com/anacrolix/torrent/internal/limiter" - "github.com/anacrolix/torrent/tracker" - "github.com/anacrolix/torrent/webtorrent" "github.com/davecgh/go-spew/spew" "github.com/dustin/go-humanize" "github.com/google/btree" @@ -34,15 +32,17 @@ import ( "golang.org/x/time/rate" "golang.org/x/xerrors" - "github.com/anacrolix/missinggo/v2" - "github.com/anacrolix/missinggo/v2/conntrack" + "github.com/anacrolix/chansync" "github.com/anacrolix/torrent/bencode" + "github.com/anacrolix/torrent/internal/limiter" "github.com/anacrolix/torrent/iplist" "github.com/anacrolix/torrent/metainfo" "github.com/anacrolix/torrent/mse" pp "github.com/anacrolix/torrent/peer_protocol" "github.com/anacrolix/torrent/storage" + "github.com/anacrolix/torrent/tracker" + "github.com/anacrolix/torrent/webtorrent" ) // Clients contain zero or more Torrents. A Client manages a blocklist, the @@ -81,6 +81,8 @@ type Client struct { websocketTrackers websocketTrackers activeAnnounceLimiter limiter.Instance + + updateRequests chansync.BroadcastCond } type ipStr string @@ -259,7 +261,7 @@ func NewClient(cfg *ClientConfig) (cl *Client, err error) { if err != nil { panic(err) } - cl.dhtServers = append(cl.dhtServers, anacrolixDhtServerWrapper{ds}) + cl.dhtServers = append(cl.dhtServers, AnacrolixDhtServerWrapper{ds}) cl.onClose = append(cl.onClose, func() { ds.Close() }) } } @@ -293,6 +295,8 @@ func NewClient(cfg *ClientConfig) (cl *Client, err error) { }, } + go cl.requester() + return } @@ -311,6 +315,10 @@ func (cl *Client) AddDialer(d Dialer) { } } +func (cl *Client) Listeners() []Listener { + return cl.listeners +} + // Registers a Listener, and starts Accepting on it. You must Close Listeners provided this way // yourself. func (cl *Client) AddListener(l Listener) { @@ -955,7 +963,7 @@ func (cl *Client) runHandshookConn(c *PeerConn, t *Torrent) error { return fmt.Errorf("adding connection: %w", err) } defer t.dropConnection(c) - go c.writer(time.Minute) + c.startWriter() cl.sendInitialMessages(c, t) err := c.mainReadLoop() if err != nil { @@ -964,10 +972,15 @@ func (cl *Client) runHandshookConn(c *PeerConn, t *Torrent) error { return nil } +// Maximum pending requests we allow peers to send us. If peer requests are buffered on read, this +// instructs the amount of memory that might be used to cache pending writes. Assuming 512KiB +// (1<<19) cached for sending, for 16KiB (1<<14) chunks. +const localClientReqq = 1 << 5 + // See the order given in Transmission's tr_peerMsgsNew. func (cl *Client) sendInitialMessages(conn *PeerConn, torrent *Torrent) { if conn.PeerExtensionBytes.SupportsExtended() && cl.config.Extensions.SupportsExtended() { - conn.post(pp.Message{ + conn.write(pp.Message{ Type: pp.Extended, ExtendedID: pp.HandshakeExtendedID, ExtendedPayload: func() []byte { @@ -975,11 +988,8 @@ func (cl *Client) sendInitialMessages(conn *PeerConn, torrent *Torrent) { M: map[pp.ExtensionName]pp.ExtensionNumber{ pp.ExtensionNameMetadata: metadataExtendedId, }, - V: cl.config.ExtendedHandshakeClientVersion, - // If peer requests are buffered on read, this instructs the amount of memory - // that might be used to cache pending writes. Assuming 512KiB cached for - // sending, for 16KiB chunks. - Reqq: 1 << 5, + V: cl.config.ExtendedHandshakeClientVersion, + Reqq: localClientReqq, YourIp: pp.CompactIp(conn.remoteIp()), Encryption: cl.config.HeaderObfuscationPolicy.Preferred || !cl.config.HeaderObfuscationPolicy.RequirePreferred, Port: cl.incomingPeerPort(), @@ -999,11 +1009,11 @@ func (cl *Client) sendInitialMessages(conn *PeerConn, torrent *Torrent) { func() { if conn.fastEnabled() { if torrent.haveAllPieces() { - conn.post(pp.Message{Type: pp.HaveAll}) - conn.sentHaves.AddRange(0, bitmap.BitIndex(conn.t.NumPieces())) + conn.write(pp.Message{Type: pp.HaveAll}) + conn.sentHaves.AddRange(0, bitmap.BitRange(conn.t.NumPieces())) return } else if !torrent.haveAnyPieces() { - conn.post(pp.Message{Type: pp.HaveNone}) + conn.write(pp.Message{Type: pp.HaveNone}) conn.sentHaves.Clear() return } @@ -1011,7 +1021,7 @@ func (cl *Client) sendInitialMessages(conn *PeerConn, torrent *Torrent) { conn.postBitfield() }() if conn.PeerExtensionBytes.SupportsDHT() && cl.config.Extensions.SupportsDHT() && cl.haveDhtServer() { - conn.post(pp.Message{ + conn.write(pp.Message{ Type: pp.Port, Port: cl.dhtPort(), }) @@ -1069,12 +1079,12 @@ func (cl *Client) gotMetadataExtensionMsg(payload []byte, t *Torrent, c *PeerCon return err case pp.RequestMetadataExtensionMsgType: if !t.haveMetadataPiece(piece) { - c.post(t.newMetadataExtensionMessage(c, pp.RejectMetadataExtensionMsgType, d["piece"], nil)) + c.write(t.newMetadataExtensionMessage(c, pp.RejectMetadataExtensionMsgType, d["piece"], nil)) return nil } start := (1 << 14) * piece c.logger.WithDefaultLevel(log.Debug).Printf("sending metadata piece %d", piece) - c.post(t.newMetadataExtensionMessage(c, pp.DataMetadataExtensionMsgType, piece, t.metadataBytes[start:start+t.metadataPieceSize(piece)])) + c.write(t.newMetadataExtensionMessage(c, pp.DataMetadataExtensionMsgType, piece, t.metadataBytes[start:start+t.metadataPieceSize(piece)])) return nil case pp.RejectMetadataExtensionMsgType: return nil @@ -1139,7 +1149,6 @@ func (cl *Client) newTorrent(ih metainfo.Hash, specStorage storage.ClientImpl) ( webSeeds: make(map[string]*Peer), } t._pendingPieces.NewSet = priorityBitmapStableNewSet - t.requestStrategy = cl.config.DefaultRequestStrategy(t.requestStrategyCallbacks(), &cl._mu) t.logger = cl.logger.WithContextValue(t) t.setChunkSize(defaultChunkSize) return @@ -1403,13 +1412,11 @@ func (cl *Client) newConnection(nc net.Conn, outgoing bool, remoteAddr PeerRemot Network: network, callbacks: &cl.config.Callbacks, }, - connString: connString, - conn: nc, - writeBuffer: new(bytes.Buffer), + connString: connString, + conn: nc, } c.peerImpl = c c.logger = cl.logger.WithDefaultLevel(log.Warning).WithContextValue(c) - c.writerCond.L = cl.locker() c.setRW(connStatsReadWriter{nc, c}) c.r = &rateLimitedReader{ l: cl.config.DownloadRateLimiter, diff --git a/client_test.go b/client_test.go index adcd0c72..84a99f46 100644 --- a/client_test.go +++ b/client_test.go @@ -322,7 +322,7 @@ func TestDhtInheritBlocklist(t *testing.T) { numServers := 0 cl.eachDhtServer(func(s DhtServer) { t.Log(s) - assert.Equal(t, ipl, s.(anacrolixDhtServerWrapper).Server.IPBlocklist()) + assert.Equal(t, ipl, s.(AnacrolixDhtServerWrapper).Server.IPBlocklist()) numServers++ }) assert.EqualValues(t, 2, numServers) @@ -554,6 +554,8 @@ func TestPeerInvalidHave(t *testing.T) { t: tt, }} cn.peerImpl = cn + cl.lock() + defer cl.unlock() assert.NoError(t, cn.peerSentHave(0)) assert.Error(t, cn.peerSentHave(1)) } diff --git a/cmd/torrent/main.go b/cmd/torrent/main.go index 02020e77..374f8b1a 100644 --- a/cmd/torrent/main.go +++ b/cmd/torrent/main.go @@ -169,19 +169,20 @@ var flags struct { type SpewBencodingCmd struct{} type DownloadCmd struct { - Mmap bool `help:"memory-map torrent data"` - TestPeer []string `help:"addresses of some starting peers"` - Seed bool `help:"seed after download is complete"` - Addr string `help:"network listen addr"` - UploadRate *tagflag.Bytes `help:"max piece bytes to send per second"` - DownloadRate *tagflag.Bytes `help:"max bytes per second down from peers"` - PackedBlocklist string - PublicIP net.IP - Progress bool `default:"true"` - PieceStates bool - Quiet bool `help:"discard client logging"` - Stats *bool `help:"print stats at termination"` - Dht bool `default:"true"` + Mmap bool `help:"memory-map torrent data"` + TestPeer []string `help:"addresses of some starting peers"` + Seed bool `help:"seed after download is complete"` + Addr string `help:"network listen addr"` + MaxUnverifiedBytes tagflag.Bytes `help:"maximum number bytes to have pending verification"` + UploadRate *tagflag.Bytes `help:"max piece bytes to send per second"` + DownloadRate *tagflag.Bytes `help:"max bytes per second down from peers"` + PackedBlocklist string + PublicIP net.IP + Progress bool `default:"true"` + PieceStates bool + Quiet bool `help:"discard client logging"` + Stats *bool `help:"print stats at termination"` + Dht bool `default:"true"` TcpPeers bool `default:"true"` UtpPeers bool `default:"true"` @@ -311,6 +312,7 @@ func downloadErr() error { if flags.Quiet { clientConfig.Logger = log.Discard } + clientConfig.MaxUnverifiedBytes = flags.MaxUnverifiedBytes.Int64() var stop missinggo.SynchronizedEvent defer func() { diff --git a/config.go b/config.go index 373ce6fe..37b2b714 100644 --- a/config.go +++ b/config.go @@ -59,6 +59,8 @@ type ClientConfig struct { // (~4096), and the requested chunk size (~16KiB, see // TorrentSpec.ChunkSize). DownloadRateLimiter *rate.Limiter + // Maximum unverified bytes across all torrents. Not used if zero. + MaxUnverifiedBytes int64 // User-provided Client peer ID. If not present, one is generated automatically. PeerID string @@ -137,8 +139,6 @@ type ClientConfig struct { // OnQuery hook func DHTOnQuery func(query *krpc.Msg, source net.Addr) (propagate bool) - DefaultRequestStrategy requestStrategyMaker - Extensions PeerExtensionBits DisableWebtorrent bool @@ -185,10 +185,7 @@ func NewDefaultClientConfig() *ClientConfig { CryptoSelector: mse.DefaultCryptoSelector, CryptoProvides: mse.AllSupportedCrypto, ListenPort: 42069, - - DefaultRequestStrategy: RequestStrategyDuplicateRequestTimeout(5 * time.Second), - - Extensions: defaultPeerExtensionBytes(), + Extensions: defaultPeerExtensionBytes(), } //cc.ConnTracker.SetNoMaxEntries() //cc.ConnTracker.Timeout = func(conntrack.Entry) time.Duration { return 0 } diff --git a/conn_stats.go b/conn_stats.go index 3fbc00d8..0c5bfc78 100644 --- a/conn_stats.go +++ b/conn_stats.go @@ -20,9 +20,10 @@ type ConnStats struct { BytesWritten Count BytesWrittenData Count - BytesRead Count - BytesReadData Count - BytesReadUsefulData Count + BytesRead Count + BytesReadData Count + BytesReadUsefulData Count + BytesReadUsefulIntendedData Count ChunksWritten Count @@ -78,14 +79,9 @@ func (cs *ConnStats) wroteMsg(msg *pp.Message) { } } -func (cs *ConnStats) readMsg(msg *pp.Message) { - // We want to also handle extended metadata pieces here, but we wouldn't - // have decoded the extended payload yet. - switch msg.Type { - case pp.Piece: - cs.ChunksRead.Add(1) - cs.BytesReadData.Add(int64(len(msg.Piece))) - } +func (cs *ConnStats) receivedChunk(size int64) { + cs.ChunksRead.Add(1) + cs.BytesReadData.Add(size) } func (cs *ConnStats) incrementPiecesDirtiedGood() { diff --git a/dht.go b/dht.go index 8b0e8471..464090f9 100644 --- a/dht.go +++ b/dht.go @@ -29,11 +29,11 @@ type DhtAnnounce interface { Peers() <-chan dht.PeersValues } -type anacrolixDhtServerWrapper struct { +type AnacrolixDhtServerWrapper struct { *dht.Server } -func (me anacrolixDhtServerWrapper) Stats() interface{} { +func (me AnacrolixDhtServerWrapper) Stats() interface{} { return me.Server.Stats() } @@ -45,13 +45,13 @@ func (me anacrolixDhtAnnounceWrapper) Peers() <-chan dht.PeersValues { return me.Announce.Peers } -func (me anacrolixDhtServerWrapper) Announce(hash [20]byte, port int, impliedPort bool) (DhtAnnounce, error) { +func (me AnacrolixDhtServerWrapper) Announce(hash [20]byte, port int, impliedPort bool) (DhtAnnounce, error) { ann, err := me.Server.Announce(hash, port, impliedPort) return anacrolixDhtAnnounceWrapper{ann}, err } -func (me anacrolixDhtServerWrapper) Ping(addr *net.UDPAddr) { +func (me AnacrolixDhtServerWrapper) Ping(addr *net.UDPAddr) { me.Server.Ping(addr) } -var _ DhtServer = anacrolixDhtServerWrapper{} +var _ DhtServer = AnacrolixDhtServerWrapper{} diff --git a/file.go b/file.go index e2ebcf4d..37c185c1 100644 --- a/file.go +++ b/file.go @@ -66,20 +66,20 @@ func fileBytesLeft( switch numPiecesSpanned { case 0: case 1: - if !torrentCompletedPieces.Get(fileFirstPieceIndex) { + if !torrentCompletedPieces.Get(bitmap.BitIndex(fileFirstPieceIndex)) { left += fileLength } default: - if !torrentCompletedPieces.Get(fileFirstPieceIndex) { + if !torrentCompletedPieces.Get(bitmap.BitIndex(fileFirstPieceIndex)) { left += torrentUsualPieceSize - (fileTorrentOffset % torrentUsualPieceSize) } - if !torrentCompletedPieces.Get(fileEndPieceIndex - 1) { + if !torrentCompletedPieces.Get(bitmap.BitIndex(fileEndPieceIndex - 1)) { left += fileTorrentOffset + fileLength - int64(fileEndPieceIndex-1)*torrentUsualPieceSize } completedMiddlePieces := torrentCompletedPieces.Copy() - completedMiddlePieces.RemoveRange(0, fileFirstPieceIndex+1) - completedMiddlePieces.RemoveRange(fileEndPieceIndex-1, bitmap.ToEnd) - left += int64(numPiecesSpanned-2-completedMiddlePieces.Len()) * torrentUsualPieceSize + completedMiddlePieces.RemoveRange(0, bitmap.BitRange(fileFirstPieceIndex+1)) + completedMiddlePieces.RemoveRange(bitmap.BitRange(fileEndPieceIndex-1), bitmap.ToEnd) + left += int64(numPiecesSpanned-2-pieceIndex(completedMiddlePieces.Len())) * torrentUsualPieceSize } return } diff --git a/global.go b/global.go index 1a09b06e..e06d9323 100644 --- a/global.go +++ b/global.go @@ -9,7 +9,6 @@ import ( const ( pieceHash = crypto.SHA1 - maxRequests = 250 // Maximum pending requests we allow peers to send us. defaultChunkSize = 0x4000 // 16KiB ) diff --git a/go.mod b/go.mod index 30b55a09..b5e8dead 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ require ( crawshaw.io/sqlite v0.3.3-0.20210127221821-98b1f83c5508 github.com/RoaringBitmap/roaring v0.6.0 // indirect github.com/alexflint/go-arg v1.3.0 + github.com/anacrolix/chansync v0.0.0-20210524073341-a336ebc2de92 // indirect github.com/anacrolix/confluence v1.7.1-0.20210311004351-d642adb8546c // indirect github.com/anacrolix/dht/v2 v2.9.1 github.com/anacrolix/envpprof v1.1.1 @@ -12,9 +13,9 @@ require ( github.com/anacrolix/log v0.9.0 github.com/anacrolix/missinggo v1.2.1 github.com/anacrolix/missinggo/perf v1.0.0 - github.com/anacrolix/missinggo/v2 v2.5.0 - github.com/anacrolix/multiless v0.1.0 - github.com/anacrolix/sync v0.2.0 + github.com/anacrolix/missinggo/v2 v2.5.1-0.20210520011502-b3d95d6b1d02 + github.com/anacrolix/multiless v0.1.1-0.20210520040635-10ee7b5f3cff + github.com/anacrolix/sync v0.3.0 github.com/anacrolix/tagflag v1.3.0 github.com/anacrolix/upnp v0.1.2-0.20200416075019-5e9378ed1425 github.com/anacrolix/utp v0.1.0 diff --git a/go.sum b/go.sum index 7cbf393e..f5eb4a2b 100644 --- a/go.sum +++ b/go.sum @@ -8,6 +8,7 @@ cloud.google.com/go v0.37.0 h1:69FNAINiZfsEuwH3fKq8QrAAnHz+2m4XL4kVYi5BX0Q= cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797 h1:yDf7ARQc637HoxDho7xjqdvO5ZA2Yb+xzv/fOnnvZzw= crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797/go.mod h1:sXBiorCo8c46JlQV3oXPKINnZ8mcqnye1EkVkqsectk= +crawshaw.io/sqlite v0.3.2/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4= crawshaw.io/sqlite v0.3.3-0.20210127221821-98b1f83c5508 h1:fILCBBFnjnrQ0whVJlGhfv1E/QiaFDNtGFBObEVRnYg= crawshaw.io/sqlite v0.3.3-0.20210127221821-98b1f83c5508/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3 h1:hJiie5Bf3QucGRa4ymsAUOxyhYwGEz1xrsVk0P8erlw= @@ -55,6 +56,8 @@ github.com/alexflint/go-arg v1.3.0 h1:UfldqSdFWeLtoOuVRosqofU4nmhI1pYEbT4ZFS34Bd github.com/alexflint/go-arg v1.3.0/go.mod h1:9iRbDxne7LcR/GSvEr7ma++GLpdIU1zrghf2y2768kM= github.com/alexflint/go-scalar v1.0.0 h1:NGupf1XV/Xb04wXskDFzS0KWOLH632W/EO4fAFi+A70= github.com/alexflint/go-scalar v1.0.0/go.mod h1:GpHzbCOZXEKMEcygYQ5n/aa4Aq84zbxjy3MxYW0gjYw= +github.com/anacrolix/chansync v0.0.0-20210524073341-a336ebc2de92 h1:WGk37RyXPWcIALJxTkTNrXN3yLQp7hSFa3x5GkrK/Rs= +github.com/anacrolix/chansync v0.0.0-20210524073341-a336ebc2de92/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k= github.com/anacrolix/confluence v1.7.1-0.20210221224747-9cb14aa2c53a/go.mod h1:T0JHvSaf9UfoiUdCtCOUuRroHm/tauUJTbLc6/vd5YA= github.com/anacrolix/confluence v1.7.1-0.20210221225853-90405640e928/go.mod h1:NoLcfoRet+kYttjLXJRmh4qBVrylJsfIItik5GGj21A= github.com/anacrolix/confluence v1.7.1-0.20210311004351-d642adb8546c h1:HfbeiZS/0hwdotwtQhllrd3PagmuLgCN9O8CHJgzPGQ= @@ -108,14 +111,18 @@ github.com/anacrolix/missinggo/v2 v2.3.1/go.mod h1:3XNH0OEmyMUZuvXmYdl+FDfXd0vvS github.com/anacrolix/missinggo/v2 v2.4.1-0.20200227072623-f02f6484f997/go.mod h1:KY+ij+mWvwGuqSuecLjjPv5LFw5ICUc1UvRems3VAZE= github.com/anacrolix/missinggo/v2 v2.5.0 h1:75aciOVrzVV1bTH9rl8tYLbXO9A7HXFtHexTChawe/U= github.com/anacrolix/missinggo/v2 v2.5.0/go.mod h1:HYuCbwvJXY3XbcmcIcTgZXHleoDXawxPWx/YiPzFzV0= +github.com/anacrolix/missinggo/v2 v2.5.1-0.20210520011502-b3d95d6b1d02 h1:wf3HKUunewks4FdGJqkViby+vr3n5/IFpPsyEMokxYE= +github.com/anacrolix/missinggo/v2 v2.5.1-0.20210520011502-b3d95d6b1d02/go.mod h1:WEjqh2rmKECd0t1VhQkLGTdIWXO6f6NLjp5GlMZ+6FA= github.com/anacrolix/mmsg v0.0.0-20180515031531-a4a3ba1fc8bb/go.mod h1:x2/ErsYUmT77kezS63+wzZp8E3byYB0gzirM/WMBLfw= github.com/anacrolix/mmsg v1.0.0 h1:btC7YLjOn29aTUAExJiVUhQOuf/8rhm+/nWCMAnL3Hg= github.com/anacrolix/mmsg v1.0.0/go.mod h1:x8kRaJY/dCrY9Al0PEcj1mb/uFHwP6GCJ9fLl4thEPc= github.com/anacrolix/multiless v0.0.0-20191223025854-070b7994e841/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= github.com/anacrolix/multiless v0.0.0-20200413040533-acfd16f65d5d/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= github.com/anacrolix/multiless v0.0.0-20210222022749-ef43011a77ec/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= -github.com/anacrolix/multiless v0.1.0 h1:gjR3SdJ+E0avnmEoAV/7K7n2kILZhVu/M6aQEtz8H3s= -github.com/anacrolix/multiless v0.1.0/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= +github.com/anacrolix/multiless v0.1.1-0.20210510014912-3f17cb19bda9 h1:fmNDxh5ysBPinRq249xYYYOLQ/h95DoyGE9e9Gp9xqo= +github.com/anacrolix/multiless v0.1.1-0.20210510014912-3f17cb19bda9/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= +github.com/anacrolix/multiless v0.1.1-0.20210520040635-10ee7b5f3cff h1:ve99yq2FjiN3OANMjRz2rjCX4f2PSKMf3NeDFnmcs8s= +github.com/anacrolix/multiless v0.1.1-0.20210520040635-10ee7b5f3cff/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= github.com/anacrolix/stm v0.1.0/go.mod h1:ZKz7e7ERWvP0KgL7WXfRjBXHNRhlVRlbBQecqFtPq+A= github.com/anacrolix/stm v0.1.1-0.20191106051447-e749ba3531cf/go.mod h1:zoVQRvSiGjGoTmbM0vSLIiaKjWtNPeTvXUSdJQA4hsg= github.com/anacrolix/stm v0.2.0/go.mod h1:zoVQRvSiGjGoTmbM0vSLIiaKjWtNPeTvXUSdJQA4hsg= @@ -128,6 +135,10 @@ github.com/anacrolix/sync v0.0.0-20180611022320-3c4cb11f5a01/go.mod h1:+u91KiUuf github.com/anacrolix/sync v0.0.0-20180808010631-44578de4e778/go.mod h1:s735Etp3joe/voe2sdaXLcqDdJSay1O0OPnM0ystjqk= github.com/anacrolix/sync v0.2.0 h1:oRe22/ZB+v7v/5Mbc4d2zE0AXEZy0trKyKLjqYOt6tY= github.com/anacrolix/sync v0.2.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g= +github.com/anacrolix/sync v0.2.1-0.20210520084835-26aa6614542f h1:7KqmZoEOIXa0UbR2WQ/YPF4H+MPV6rhWk4E4tcv5eDg= +github.com/anacrolix/sync v0.2.1-0.20210520084835-26aa6614542f/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g= +github.com/anacrolix/sync v0.3.0 h1:ZPjTrkqQWEfnYVGTQHh5qNjokWaXnjsyXTJSMsKY0TA= +github.com/anacrolix/sync v0.3.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g= github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/anacrolix/tagflag v0.0.0-20180605133421-f477c8c2f14c/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/anacrolix/tagflag v0.0.0-20180803105420-3a8ff5428f76/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= @@ -135,7 +146,6 @@ github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pm github.com/anacrolix/tagflag v1.0.1/go.mod h1:gb0fiMQ02qU1djCSqaxGmruMvZGrMwSReidMB0zjdxo= github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= github.com/anacrolix/tagflag v1.1.1-0.20200411025953-9bb5209d56c2/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= -github.com/anacrolix/tagflag v1.2.0 h1:WdSv10SpxOI97++f5FUKnKPFkVGMiPlpYm52XPaMkp4= github.com/anacrolix/tagflag v1.2.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= github.com/anacrolix/tagflag v1.3.0 h1:5NI+9CniDnEH0BWA4UcQbERyFPjKJqZnVkItGVIDy/s= github.com/anacrolix/tagflag v1.3.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= @@ -268,8 +278,6 @@ github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/getlantern/sqlite v0.3.3-0.20210215090556-4f83cf7731f0 h1:zvFSvII5rTbMZ3idAqSUjUCDgZFbWMKzxQot3/Y7nzA= -github.com/getlantern/sqlite v0.3.3-0.20210215090556-4f83cf7731f0/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1 h1:j3L6gSLQalDETeEg/Jg0mGY0/y/N6zI2xX1978P0Uqw= @@ -424,6 +432,7 @@ github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/go.net v0.0.1 h1:sNCoNyDEvN1xa+X0baata4RdcpKwcMS6DH+xwfqPgjw= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= diff --git a/misc.go b/misc.go index 5ad01d85..508f0a63 100644 --- a/misc.go +++ b/misc.go @@ -5,29 +5,27 @@ import ( "net" "github.com/anacrolix/missinggo/v2" + "github.com/anacrolix/torrent/types" "golang.org/x/time/rate" "github.com/anacrolix/torrent/metainfo" pp "github.com/anacrolix/torrent/peer_protocol" ) -type ChunkSpec struct { - Begin, Length pp.Integer -} +type ( + Request = types.Request + ChunkSpec = types.ChunkSpec + piecePriority = types.PiecePriority +) -type Request struct { - Index pp.Integer - ChunkSpec -} - -func (r Request) ToMsg(mt pp.MessageType) pp.Message { - return pp.Message{ - Type: mt, - Index: r.Index, - Begin: r.Begin, - Length: r.Length, - } -} +const ( + PiecePriorityNormal = types.PiecePriorityNormal + PiecePriorityNone = types.PiecePriorityNone + PiecePriorityNow = types.PiecePriorityNow + PiecePriorityReadahead = types.PiecePriorityReadahead + PiecePriorityNext = types.PiecePriorityNext + PiecePriorityHigh = types.PiecePriorityHigh +) func newRequest(index, begin, length pp.Integer) Request { return Request{index, ChunkSpec{begin, length}} @@ -151,6 +149,16 @@ func min(as ...int64) int64 { return ret } +func minInt(as ...int) int { + ret := as[0] + for _, a := range as[1:] { + if a < ret { + ret = a + } + } + return ret +} + var unlimited = rate.NewLimiter(rate.Inf, 0) type ( diff --git a/misc_test.go b/misc_test.go index f3a37683..5190e4f4 100644 --- a/misc_test.go +++ b/misc_test.go @@ -35,7 +35,7 @@ func BenchmarkIterBitmapsDistinct(t *testing.B) { output := iter.ToSlice(iterBitmapsDistinct(&skipCopy, first, second)) t.StopTimer() assert.Equal(t, []interface{}{0, 3, 2}, output) - assert.Equal(t, []int{1}, skip.ToSortedSlice()) + assert.Equal(t, []bitmap.BitIndex{1}, skip.ToSortedSlice()) } } diff --git a/peer-conn-msg-writer.go b/peer-conn-msg-writer.go new file mode 100644 index 00000000..40fc27ef --- /dev/null +++ b/peer-conn-msg-writer.go @@ -0,0 +1,133 @@ +package torrent + +import ( + "bytes" + "io" + "time" + + "github.com/anacrolix/chansync" + "github.com/anacrolix/log" + "github.com/anacrolix/sync" + + pp "github.com/anacrolix/torrent/peer_protocol" +) + +func (pc *PeerConn) startWriter() { + w := &pc.messageWriter + *w = peerConnMsgWriter{ + fillWriteBuffer: func() { + pc.locker().Lock() + defer pc.locker().Unlock() + if pc.closed.IsSet() { + return + } + pc.fillWriteBuffer() + }, + closed: &pc.closed, + logger: pc.logger, + w: pc.w, + keepAlive: func() bool { + pc.locker().Lock() + defer pc.locker().Unlock() + return pc.useful() + }, + writeBuffer: new(bytes.Buffer), + } + go func() { + defer pc.locker().Unlock() + defer pc.close() + defer pc.locker().Lock() + pc.messageWriter.run(time.Minute) + }() +} + +type peerConnMsgWriter struct { + // Must not be called with the local mutex held, as it will call back into the write method. + fillWriteBuffer func() + closed *chansync.SetOnce + logger log.Logger + w io.Writer + keepAlive func() bool + + mu sync.Mutex + writeCond chansync.BroadcastCond + // Pointer so we can swap with the "front buffer". + writeBuffer *bytes.Buffer +} + +// Routine that writes to the peer. Some of what to write is buffered by +// activity elsewhere in the Client, and some is determined locally when the +// connection is writable. +func (cn *peerConnMsgWriter) run(keepAliveTimeout time.Duration) { + var ( + lastWrite time.Time = time.Now() + keepAliveTimer *time.Timer + ) + keepAliveTimer = time.AfterFunc(keepAliveTimeout, func() { + cn.mu.Lock() + defer cn.mu.Unlock() + if time.Since(lastWrite) >= keepAliveTimeout { + cn.writeCond.Broadcast() + } + keepAliveTimer.Reset(keepAliveTimeout) + }) + cn.mu.Lock() + defer cn.mu.Unlock() + defer keepAliveTimer.Stop() + frontBuf := new(bytes.Buffer) + for { + if cn.closed.IsSet() { + return + } + if cn.writeBuffer.Len() == 0 { + func() { + cn.mu.Unlock() + defer cn.mu.Lock() + cn.fillWriteBuffer() + }() + } + if cn.writeBuffer.Len() == 0 && time.Since(lastWrite) >= keepAliveTimeout && cn.keepAlive() { + cn.writeBuffer.Write(pp.Message{Keepalive: true}.MustMarshalBinary()) + torrent.Add("written keepalives", 1) + } + if cn.writeBuffer.Len() == 0 { + writeCond := cn.writeCond.Signaled() + cn.mu.Unlock() + select { + case <-cn.closed.Done(): + case <-writeCond: + } + cn.mu.Lock() + continue + } + // Flip the buffers. + frontBuf, cn.writeBuffer = cn.writeBuffer, frontBuf + cn.mu.Unlock() + n, err := cn.w.Write(frontBuf.Bytes()) + cn.mu.Lock() + if n != 0 { + lastWrite = time.Now() + keepAliveTimer.Reset(keepAliveTimeout) + } + if err != nil { + cn.logger.WithDefaultLevel(log.Debug).Printf("error writing: %v", err) + return + } + if n != frontBuf.Len() { + panic("short write") + } + frontBuf.Reset() + } +} + +func (cn *peerConnMsgWriter) write(msg pp.Message) bool { + cn.mu.Lock() + defer cn.mu.Unlock() + cn.writeBuffer.Write(msg.MustMarshalBinary()) + cn.writeCond.Broadcast() + return !cn.writeBufferFull() +} + +func (cn *peerConnMsgWriter) writeBufferFull() bool { + return cn.writeBuffer.Len() >= writeBufferHighWaterLen +} diff --git a/peer-impl.go b/peer-impl.go index a04a160b..23c0fbb9 100644 --- a/peer-impl.go +++ b/peer-impl.go @@ -8,16 +8,20 @@ import ( // BitTorrent protocol connections. Some methods are underlined so as to avoid collisions with // legacy PeerConn methods. type peerImpl interface { + onNextRequestStateChanged() updateRequests() writeInterested(interested bool) bool - cancel(Request) bool - // Return true if there's room for more activity. - request(Request) bool + + // Neither of these return buffer room anymore, because they're currently both posted. There's + // also PeerConn.writeBufferFull for when/where it matters. + _cancel(Request) bool + _request(Request) bool + connectionFlags() string onClose() - _postCancel(Request) onGotInfo(*metainfo.Info) drop() String() string connStatusString() string + writeBufferFull() bool } diff --git a/peerconn.go b/peerconn.go index b26f4398..b35564ac 100644 --- a/peerconn.go +++ b/peerconn.go @@ -8,22 +8,23 @@ import ( "io" "math/rand" "net" + "sort" "strconv" "strings" - "sync" "time" "github.com/anacrolix/log" - "github.com/anacrolix/missinggo" "github.com/anacrolix/missinggo/iter" "github.com/anacrolix/missinggo/v2/bitmap" "github.com/anacrolix/missinggo/v2/prioritybitmap" "github.com/anacrolix/multiless" - "github.com/anacrolix/torrent/metainfo" + "github.com/anacrolix/chansync" "github.com/anacrolix/torrent/bencode" + "github.com/anacrolix/torrent/metainfo" "github.com/anacrolix/torrent/mse" pp "github.com/anacrolix/torrent/peer_protocol" + request_strategy "github.com/anacrolix/torrent/request-strategy" ) type PeerSource string @@ -46,6 +47,13 @@ type PeerRemoteAddr interface { String() string } +// Since we have to store all the requests in memory, we can't reasonably exceed what would be +// indexable with the memory space available. +type ( + maxRequests = int + requestState = request_strategy.PeerNextRequestState +) + type Peer struct { // First to ensure 64-bit alignment for atomics. See #262. _stats ConnStats @@ -63,7 +71,7 @@ type Peer struct { cryptoMethod mse.CryptoMethod Discovery PeerSource trusted bool - closed missinggo.Event + closed chansync.SetOnce // Set true after we've added our ConnStats generated during handshake to // other ConnStat instances as determined when the *Torrent became known. reconciledHandshakeStats bool @@ -74,7 +82,8 @@ type Peer struct { lastChunkSent time.Time // Stuff controlled by the local peer. - interested bool + nextRequestState requestState + actualRequestState requestState lastBecameInterested time.Time priorInterest time.Duration @@ -82,9 +91,9 @@ type Peer struct { cumulativeExpectedToReceiveChunks time.Duration _chunksReceivedWhileExpecting int64 - choking bool - requests map[Request]struct{} - requestsLowWater int + choking bool + piecesReceivedSinceLastRequestUpdate maxRequests + maxPiecesReceivedBetweenRequestUpdates maxRequests // Chunks that we might reasonably expect to receive from the peer. Due to // latency, buffering, and implementation differences, we may receive // chunks that are no longer in the set of requests actually want. @@ -113,7 +122,7 @@ type Peer struct { peerTouchedPieces map[pieceIndex]struct{} peerAllowedFast bitmap.Bitmap - PeerMaxRequests int // Maximum pending requests the peer allows. + PeerMaxRequests maxRequests // Maximum pending requests the peer allows. PeerExtensionIDs map[pp.ExtensionName]pp.ExtensionNumber PeerClientName string @@ -142,11 +151,10 @@ type PeerConn struct { w io.Writer r io.Reader - writeBuffer *bytes.Buffer - uploadTimer *time.Timer - writerCond sync.Cond + messageWriter peerConnMsgWriter - pex pexConnState + uploadTimer *time.Timer + pex pexConnState } func (cn *PeerConn) connStatusString() string { @@ -167,7 +175,22 @@ func (cn *Peer) updateExpectingChunks() { } func (cn *Peer) expectingChunks() bool { - return len(cn.requests) != 0 && !cn.peerChoking + if len(cn.actualRequestState.Requests) == 0 { + return false + } + if !cn.actualRequestState.Interested { + return false + } + for r := range cn.actualRequestState.Requests { + if !cn.remoteChokingPiece(r.Index.Int()) { + return true + } + } + return false +} + +func (cn *Peer) remoteChokingPiece(piece pieceIndex) bool { + return cn.peerChoking && !cn.peerAllowedFast.Contains(bitmap.BitIndex(piece)) } // Returns true if the connection is over IPv6. @@ -198,7 +221,7 @@ func (l *PeerConn) hasPreferredNetworkOver(r *PeerConn) (left, ok bool) { func (cn *Peer) cumInterest() time.Duration { ret := cn.priorInterest - if cn.interested { + if cn.actualRequestState.Interested { ret += time.Since(cn.lastBecameInterested) } return ret @@ -211,7 +234,7 @@ func (cn *Peer) peerHasAllPieces() (all bool, known bool) { if !cn.t.haveInfo() { return false, false } - return bitmap.Flip(cn._peerPieces, 0, bitmap.BitIndex(cn.t.numPieces())).IsEmpty(), true + return bitmap.Flip(cn._peerPieces, 0, bitmap.BitRange(cn.t.numPieces())).IsEmpty(), true } func (cn *PeerConn) locker() *lockWithDeferreds { @@ -246,7 +269,7 @@ func (cn *PeerConn) onGotInfo(info *metainfo.Info) { // Correct the PeerPieces slice length. Return false if the existing slice is invalid, such as by // receiving badly sized BITFIELD, or invalid HAVE messages. func (cn *PeerConn) setNumPieces(num pieceIndex) { - cn._peerPieces.RemoveRange(bitmap.BitIndex(num), bitmap.ToEnd) + cn._peerPieces.RemoveRange(bitmap.BitRange(num), bitmap.ToEnd) cn.peerPiecesChanged() } @@ -282,7 +305,7 @@ func (cn *Peer) statusFlags() (ret string) { c := func(b byte) { ret += string([]byte{b}) } - if cn.interested { + if cn.actualRequestState.Interested { c('i') } if cn.choking { @@ -300,14 +323,20 @@ func (cn *Peer) statusFlags() (ret string) { return } -// func (cn *connection) String() string { -// var buf bytes.Buffer -// cn.writeStatus(&buf, nil) -// return buf.String() -// } - func (cn *Peer) downloadRate() float64 { - return float64(cn._stats.BytesReadUsefulData.Int64()) / cn.cumInterest().Seconds() + num := cn._stats.BytesReadUsefulData.Int64() + if num == 0 { + return 0 + } + return float64(num) / cn.totalExpectingTime().Seconds() +} + +func (cn *Peer) numRequestsByPiece() (ret map[pieceIndex]int) { + ret = make(map[pieceIndex]int) + for r := range cn.actualRequestState.Requests { + ret[pieceIndex(r.Index)]++ + } + return } func (cn *Peer) writeStatus(w io.Writer, t *Torrent) { @@ -316,6 +345,12 @@ func (cn *Peer) writeStatus(w io.Writer, t *Torrent) { fmt.Fprint(w, "CLOSED: ") } fmt.Fprintln(w, cn.connStatusString()) + prio, err := cn.peerPriority() + prioStr := fmt.Sprintf("%08x", prio) + if err != nil { + prioStr += ": " + err.Error() + } + fmt.Fprintf(w, " bep40-prio: %v\n", prioStr) fmt.Fprintf(w, " last msg: %s, connected: %s, last helpful: %s, itime: %s, etime: %s\n", eventAgeString(cn.lastMessageReceived), eventAgeString(cn.completedHandshake), @@ -324,40 +359,50 @@ func (cn *Peer) writeStatus(w io.Writer, t *Torrent) { cn.totalExpectingTime(), ) fmt.Fprintf(w, - " %s completed, %d pieces touched, good chunks: %v/%v-%v reqq: (%d,%d,%d]-%d, flags: %s, dr: %.1f KiB/s\n", + " %s completed, %d pieces touched, good chunks: %v/%v-%v reqq: %d/(%d/%d)-%d/%d, flags: %s, dr: %.1f KiB/s\n", cn.completedString(), len(cn.peerTouchedPieces), &cn._stats.ChunksReadUseful, &cn._stats.ChunksRead, &cn._stats.ChunksWritten, - cn.requestsLowWater, cn.numLocalRequests(), cn.nominalMaxRequests(), + cn.PeerMaxRequests, len(cn.peerRequests), + localClientReqq, cn.statusFlags(), cn.downloadRate()/(1<<10), ) - fmt.Fprintf(w, " next pieces: %v%s\n", - iter.ToSlice(iter.Head(10, cn.iterPendingPiecesUntyped)), - func() string { - if cn == t.fastestPeer { - return " (fastest)" - } else { - return "" - } - }(), - ) + fmt.Fprintf(w, " requested pieces:") + type pieceNumRequestsType struct { + piece pieceIndex + numRequests int + } + var pieceNumRequests []pieceNumRequestsType + for piece, count := range cn.numRequestsByPiece() { + pieceNumRequests = append(pieceNumRequests, pieceNumRequestsType{piece, count}) + } + sort.Slice(pieceNumRequests, func(i, j int) bool { + return pieceNumRequests[i].piece < pieceNumRequests[j].piece + }) + for _, elem := range pieceNumRequests { + fmt.Fprintf(w, " %v(%v)", elem.piece, elem.numRequests) + } + fmt.Fprintf(w, "\n") } -func (cn *Peer) close() { - if !cn.closed.Set() { +func (p *Peer) close() { + if !p.closed.Set() { return } - cn.discardPieceInclination() - cn._pieceRequestOrder.Clear() - cn.peerImpl.onClose() - for _, f := range cn.callbacks.PeerClosed { - f(cn) + p.discardPieceInclination() + p._pieceRequestOrder.Clear() + p.peerImpl.onClose() + if p.t != nil { + p.t.decPeerPieceAvailability(p) + } + for _, f := range p.callbacks.PeerClosed { + f(p) } } @@ -382,27 +427,18 @@ func (cn *Peer) peerHasPiece(piece pieceIndex) bool { // https://github.com/pion/datachannel/issues/59 is fixed. const writeBufferHighWaterLen = 1 << 15 -// Writes a message into the write buffer. Returns whether it's okay to keep writing. Posting is -// done asynchronously, so it may be that we're not able to honour backpressure from this method. It -// might be possible to merge this with PeerConn.write down the track? They seem to be very similar. -func (cn *PeerConn) post(msg pp.Message) bool { - torrent.Add(fmt.Sprintf("messages posted of type %s", msg.Type.String()), 1) - // We don't need to track bytes here because a connection.w Writer wrapper takes care of that - // (although there's some delay between us recording the message, and the connection writer +// Writes a message into the write buffer. Returns whether it's okay to keep writing. Writing is +// done asynchronously, so it may be that we're not able to honour backpressure from this method. +func (cn *PeerConn) write(msg pp.Message) bool { + torrent.Add(fmt.Sprintf("messages written of type %s", msg.Type.String()), 1) + // We don't need to track bytes here because the connection's Writer has that behaviour injected + // (although there's some delay between us buffering the message, and the connection writer // flushing it out.). - cn.writeBuffer.Write(msg.MustMarshalBinary()) - // Last I checked only Piece messages affect stats, and we don't post those. + notFull := cn.messageWriter.write(msg) + // Last I checked only Piece messages affect stats, and we don't write those. cn.wroteMsg(&msg) cn.tickleWriter() - return cn.writeBuffer.Len() < writeBufferHighWaterLen -} - -// Returns true if there's room to write more. -func (cn *PeerConn) write(msg pp.Message) bool { - cn.wroteMsg(&msg) - cn.writeBuffer.Write(msg.MustMarshalBinary()) - torrent.Add(fmt.Sprintf("messages filled of type %s", msg.Type.String()), 1) - return cn.writeBuffer.Len() < writeBufferHighWaterLen + return notFull } func (cn *PeerConn) requestMetadataPiece(index int) { @@ -414,7 +450,7 @@ func (cn *PeerConn) requestMetadataPiece(index int) { return } cn.logger.WithDefaultLevel(log.Debug).Printf("requesting metadata piece %d", index) - cn.post(pp.Message{ + cn.write(pp.Message{ Type: pp.Extended, ExtendedID: eID, ExtendedPayload: func() []byte { @@ -439,12 +475,8 @@ func (cn *PeerConn) requestedMetadataPiece(index int) bool { } // The actual value to use as the maximum outbound requests. -func (cn *Peer) nominalMaxRequests() (ret int) { - return int(clamp( - 1, - int64(cn.PeerMaxRequests), - int64(cn.t.requestStrategy.nominalMaxRequests(cn.requestStrategyConnection())), - )) +func (cn *Peer) nominalMaxRequests() (ret maxRequests) { + return int(clamp(1, 2*int64(cn.maxPiecesReceivedBetweenRequestUpdates), int64(cn.PeerMaxRequests))) } func (cn *Peer) totalExpectingTime() (ret time.Duration) { @@ -498,10 +530,10 @@ func (cn *PeerConn) unchoke(msg func(pp.Message) bool) bool { } func (cn *Peer) setInterested(interested bool) bool { - if cn.interested == interested { + if cn.actualRequestState.Interested == interested { return true } - cn.interested = interested + cn.actualRequestState.Interested = interested if interested { cn.lastBecameInterested = time.Now() } else if !cn.lastBecameInterested.IsZero() { @@ -528,12 +560,9 @@ func (pc *PeerConn) writeInterested(interested bool) bool { // are okay. type messageWriter func(pp.Message) bool -func (cn *Peer) request(r Request) bool { - if _, ok := cn.requests[r]; ok { - panic("chunk already requested") - } +func (cn *Peer) shouldRequest(r Request) error { if !cn.peerHasPiece(pieceIndex(r.Index)) { - panic("requesting piece peer doesn't have") + return errors.New("requesting piece peer doesn't have") } if !cn.t.peerIsActive(cn) { panic("requesting but not in active conns") @@ -541,37 +570,42 @@ func (cn *Peer) request(r Request) bool { if cn.closed.IsSet() { panic("requesting when connection is closed") } - if cn.peerChoking { - if cn.peerAllowedFast.Get(int(r.Index)) { - torrent.Add("allowed fast requests sent", 1) - } else { - panic("requesting while choking and not allowed fast") - } - } if cn.t.hashingPiece(pieceIndex(r.Index)) { panic("piece is being hashed") } if cn.t.pieceQueuedForHash(pieceIndex(r.Index)) { panic("piece is queued for hash") } - if cn.requests == nil { - cn.requests = make(map[Request]struct{}) + return nil +} + +func (cn *Peer) request(r Request) (more bool, err error) { + if err := cn.shouldRequest(r); err != nil { + panic(err) } - cn.requests[r] = struct{}{} + if _, ok := cn.actualRequestState.Requests[r]; ok { + return true, nil + } + if cn.numLocalRequests() >= cn.nominalMaxRequests() { + return true, errors.New("too many outstanding requests") + } + if cn.actualRequestState.Requests == nil { + cn.actualRequestState.Requests = make(map[Request]struct{}) + } + cn.actualRequestState.Requests[r] = struct{}{} if cn.validReceiveChunks == nil { cn.validReceiveChunks = make(map[Request]int) } cn.validReceiveChunks[r]++ cn.t.pendingRequests[r]++ - cn.t.requestStrategy.hooks().sentRequest(r) cn.updateExpectingChunks() for _, f := range cn.callbacks.SentRequest { f(PeerRequestEvent{cn, r}) } - return cn.peerImpl.request(r) + return cn.peerImpl._request(r), nil } -func (me *PeerConn) request(r Request) bool { +func (me *PeerConn) _request(r Request) bool { return me.write(pp.Message{ Type: pp.Request, Index: r.Index, @@ -580,66 +614,19 @@ func (me *PeerConn) request(r Request) bool { }) } -func (me *PeerConn) cancel(r Request) bool { - return me.write(makeCancelMessage(r)) -} - -func (cn *Peer) doRequestState() bool { - if !cn.t.networkingEnabled || cn.t.dataDownloadDisallowed { - if !cn.setInterested(false) { - return false - } - if len(cn.requests) != 0 { - for r := range cn.requests { - cn.deleteRequest(r) - // log.Printf("%p: cancelling request: %v", cn, r) - if !cn.peerImpl.cancel(r) { - return false - } - } - } - } else if len(cn.requests) <= cn.requestsLowWater { - filledBuffer := false - cn.iterPendingPieces(func(pieceIndex pieceIndex) bool { - cn.iterPendingRequests(pieceIndex, func(r Request) bool { - if !cn.setInterested(true) { - filledBuffer = true - return false - } - if len(cn.requests) >= cn.nominalMaxRequests() { - return false - } - // Choking is looked at here because our interest is dependent - // on whether we'd make requests in its absence. - if cn.peerChoking { - if !cn.peerAllowedFast.Get(bitmap.BitIndex(r.Index)) { - return false - } - } - if _, ok := cn.requests[r]; ok { - return true - } - filledBuffer = !cn.request(r) - return !filledBuffer - }) - return !filledBuffer - }) - if filledBuffer { - // If we didn't completely top up the requests, we shouldn't mark - // the low water, since we'll want to top up the requests as soon - // as we have more write buffer space. - return false - } - cn.requestsLowWater = len(cn.requests) / 2 - if len(cn.requests) == 0 { - return cn.setInterested(false) - } +func (me *Peer) cancel(r Request) bool { + if me.deleteRequest(r) { + return me.peerImpl._cancel(r) } return true } +func (me *PeerConn) _cancel(r Request) bool { + return me.write(makeCancelMessage(r)) +} + func (cn *PeerConn) fillWriteBuffer() { - if !cn.doRequestState() { + if !cn.applyNextRequestState() { return } if cn.pex.IsEnabled() { @@ -650,68 +637,11 @@ func (cn *PeerConn) fillWriteBuffer() { cn.upload(cn.write) } -// Routine that writes to the peer. Some of what to write is buffered by -// activity elsewhere in the Client, and some is determined locally when the -// connection is writable. -func (cn *PeerConn) writer(keepAliveTimeout time.Duration) { - var ( - lastWrite time.Time = time.Now() - keepAliveTimer *time.Timer - ) - keepAliveTimer = time.AfterFunc(keepAliveTimeout, func() { - cn.locker().Lock() - defer cn.locker().Unlock() - if time.Since(lastWrite) >= keepAliveTimeout { - cn.tickleWriter() - } - keepAliveTimer.Reset(keepAliveTimeout) - }) - cn.locker().Lock() - defer cn.locker().Unlock() - defer cn.close() - defer keepAliveTimer.Stop() - frontBuf := new(bytes.Buffer) - for { - if cn.closed.IsSet() { - return - } - if cn.writeBuffer.Len() == 0 { - cn.fillWriteBuffer() - } - if cn.writeBuffer.Len() == 0 && time.Since(lastWrite) >= keepAliveTimeout && cn.useful() { - cn.writeBuffer.Write(pp.Message{Keepalive: true}.MustMarshalBinary()) - torrent.Add("written keepalives", 1) - } - if cn.writeBuffer.Len() == 0 { - // TODO: Minimize wakeups.... - cn.writerCond.Wait() - continue - } - // Flip the buffers. - frontBuf, cn.writeBuffer = cn.writeBuffer, frontBuf - cn.locker().Unlock() - n, err := cn.w.Write(frontBuf.Bytes()) - cn.locker().Lock() - if n != 0 { - lastWrite = time.Now() - keepAliveTimer.Reset(keepAliveTimeout) - } - if err != nil { - cn.logger.WithDefaultLevel(log.Debug).Printf("error writing: %v", err) - return - } - if n != frontBuf.Len() { - panic("short write") - } - frontBuf.Reset() - } -} - func (cn *PeerConn) have(piece pieceIndex) { if cn.sentHaves.Get(bitmap.BitIndex(piece)) { return } - cn.post(pp.Message{ + cn.write(pp.Message{ Type: pp.Have, Index: pp.Integer(piece), }) @@ -725,7 +655,7 @@ func (cn *PeerConn) postBitfield() { if !cn.t.haveAnyPieces() { return } - cn.post(pp.Message{ + cn.write(pp.Message{ Type: pp.Bitfield, Bitfield: cn.t.bitfield(), }) @@ -733,8 +663,7 @@ func (cn *PeerConn) postBitfield() { } func (cn *PeerConn) updateRequests() { - // log.Print("update requests") - cn.tickleWriter() + cn.t.cl.tickleRequester() } // Emits the indices in the Bitmaps bms in order, never repeating any index. @@ -743,10 +672,13 @@ func (cn *PeerConn) updateRequests() { func iterBitmapsDistinct(skip *bitmap.Bitmap, bms ...bitmap.Bitmap) iter.Func { return func(cb iter.Callback) { for _, bm := range bms { - bm.Sub(*skip) if !iter.All( - func(i interface{}) bool { - skip.Add(i.(int)) + func(_i interface{}) bool { + i := _i.(int) + if skip.Contains(bitmap.BitIndex(i)) { + return true + } + skip.Add(bitmap.BitIndex(i)) return cb(i) }, bm.Iter, @@ -757,65 +689,9 @@ func iterBitmapsDistinct(skip *bitmap.Bitmap, bms ...bitmap.Bitmap) iter.Func { } } -func iterUnbiasedPieceRequestOrder(cn requestStrategyConnection, f func(piece pieceIndex) bool) bool { - now, readahead := cn.torrent().readerPiecePriorities() - skip := bitmap.Flip(cn.peerPieces(), 0, cn.torrent().numPieces()) - skip.Union(cn.torrent().ignorePieces()) - // Return an iterator over the different priority classes, minus the skip pieces. - return iter.All( - func(_piece interface{}) bool { - return f(pieceIndex(_piece.(bitmap.BitIndex))) - }, - iterBitmapsDistinct(&skip, now, readahead), - // We have to iterate _pendingPieces separately because it isn't a Bitmap. - func(cb iter.Callback) { - cn.torrent().pendingPieces().IterTyped(func(piece int) bool { - if skip.Contains(piece) { - return true - } - more := cb(piece) - skip.Add(piece) - return more - }) - }, - ) -} - -// The connection should download highest priority pieces first, without any inclination toward -// avoiding wastage. Generally we might do this if there's a single connection, or this is the -// fastest connection, and we have active readers that signal an ordering preference. It's -// conceivable that the best connection should do this, since it's least likely to waste our time if -// assigned to the highest priority pieces, and assigning more than one this role would cause -// significant wasted bandwidth. -func (cn *Peer) shouldRequestWithoutBias() bool { - return cn.t.requestStrategy.shouldRequestWithoutBias(cn.requestStrategyConnection()) -} - -func (cn *Peer) iterPendingPieces(f func(pieceIndex) bool) { - if !cn.t.haveInfo() { - return - } - if cn.closed.IsSet() { - return - } - cn.t.requestStrategy.iterPendingPieces(cn, f) -} -func (cn *Peer) iterPendingPiecesUntyped(f iter.Callback) { - cn.iterPendingPieces(func(i pieceIndex) bool { return f(i) }) -} - -func (cn *Peer) iterPendingRequests(piece pieceIndex, f func(Request) bool) bool { - return cn.t.requestStrategy.iterUndirtiedChunks( - cn.t.piece(piece).requestStrategyPiece(), - func(cs ChunkSpec) bool { - return f(Request{pp.Integer(piece), cs}) - }, - ) -} - // check callers updaterequests func (cn *Peer) stopRequestingPiece(piece pieceIndex) bool { - return cn._pieceRequestOrder.Remove(bitmap.BitIndex(piece)) + return cn._pieceRequestOrder.Remove(piece) } // This is distinct from Torrent piece priority, which is the user's @@ -831,8 +707,7 @@ func (cn *Peer) updatePiecePriority(piece pieceIndex) bool { return cn.stopRequestingPiece(piece) } prio := cn.getPieceInclination()[piece] - prio = cn.t.requestStrategy.piecePriority(cn, piece, tpp, prio) - return cn._pieceRequestOrder.Set(bitmap.BitIndex(piece), prio) || cn.shouldRequestWithoutBias() + return cn._pieceRequestOrder.Set(piece, prio) } func (cn *Peer) getPieceInclination() []int { @@ -850,7 +725,7 @@ func (cn *Peer) discardPieceInclination() { cn.pieceInclination = nil } -func (cn *PeerConn) peerPiecesChanged() { +func (cn *Peer) peerPiecesChanged() { if cn.t.haveInfo() { prioritiesChanged := false for i := pieceIndex(0); i < cn.t.numPieces(); i++ { @@ -862,7 +737,7 @@ func (cn *PeerConn) peerPiecesChanged() { cn.updateRequests() } } - cn.t.maybeDropMutuallyCompletePeer(&cn.Peer) + cn.t.maybeDropMutuallyCompletePeer(cn) } func (cn *PeerConn) raisePeerMinPieces(newMin pieceIndex) { @@ -879,6 +754,9 @@ func (cn *PeerConn) peerSentHave(piece pieceIndex) error { return nil } cn.raisePeerMinPieces(piece + 1) + if !cn.peerHasPiece(piece) { + cn.t.incPieceAvailability(piece) + } cn._peerPieces.Set(bitmap.BitIndex(piece), true) cn.t.maybeDropMutuallyCompletePeer(&cn.Peer) if cn.updatePiecePriority(piece) { @@ -888,35 +766,56 @@ func (cn *PeerConn) peerSentHave(piece pieceIndex) error { } func (cn *PeerConn) peerSentBitfield(bf []bool) error { - cn.peerSentHaveAll = false if len(bf)%8 != 0 { panic("expected bitfield length divisible by 8") } - // We know that the last byte means that at most the last 7 bits are - // wasted. + // We know that the last byte means that at most the last 7 bits are wasted. cn.raisePeerMinPieces(pieceIndex(len(bf) - 7)) if cn.t.haveInfo() && len(bf) > int(cn.t.numPieces()) { // Ignore known excess pieces. bf = bf[:cn.t.numPieces()] } + pp := cn.newPeerPieces() + cn.peerSentHaveAll = false for i, have := range bf { if have { cn.raisePeerMinPieces(pieceIndex(i) + 1) + if !pp.Contains(bitmap.BitIndex(i)) { + cn.t.incPieceAvailability(i) + } + } else { + if pp.Contains(bitmap.BitIndex(i)) { + cn.t.decPieceAvailability(i) + } } - cn._peerPieces.Set(i, have) + cn._peerPieces.Set(bitmap.BitIndex(i), have) } cn.peerPiecesChanged() return nil } -func (cn *PeerConn) onPeerSentHaveAll() error { +func (cn *Peer) onPeerHasAllPieces() { + t := cn.t + if t.haveInfo() { + pp := cn.newPeerPieces() + for i := range iter.N(t.numPieces()) { + if !pp.Contains(bitmap.BitIndex(i)) { + t.incPieceAvailability(i) + } + } + } cn.peerSentHaveAll = true cn._peerPieces.Clear() cn.peerPiecesChanged() +} + +func (cn *PeerConn) onPeerSentHaveAll() error { + cn.onPeerHasAllPieces() return nil } func (cn *PeerConn) peerSentHaveNone() error { + cn.t.decPeerPieceAvailability(&cn.Peer) cn._peerPieces.Clear() cn.peerSentHaveAll = false cn.peerPiecesChanged() @@ -957,10 +856,6 @@ func (cn *PeerConn) wroteMsg(msg *pp.Message) { cn.allStats(func(cs *ConnStats) { cs.wroteMsg(msg) }) } -func (cn *PeerConn) readMsg(msg *pp.Message) { - cn.allStats(func(cs *ConnStats) { cs.readMsg(msg) }) -} - // After handshake, we know what Torrent and Client stats to include for a // connection. func (cn *Peer) postHandshakeStats(f func(*ConnStats)) { @@ -1022,7 +917,7 @@ func (c *PeerConn) reject(r Request) { if !c.fastEnabled() { panic("fast not enabled") } - c.post(r.ToMsg(pp.Reject)) + c.write(r.ToMsg(pp.Reject)) delete(c.peerRequests, r) } @@ -1040,7 +935,8 @@ func (c *PeerConn) onReadRequest(r Request) error { } return nil } - if len(c.peerRequests) >= maxRequests { + // TODO: What if they've already requested this? + if len(c.peerRequests) >= localClientReqq { torrent.Add("requests received while queue full", 1) if c.fastEnabled() { c.reject(r) @@ -1061,7 +957,7 @@ func (c *PeerConn) onReadRequest(r Request) error { return errors.New("bad Request") } if c.peerRequests == nil { - c.peerRequests = make(map[Request]*peerRequestState, maxRequests) + c.peerRequests = make(map[Request]*peerRequestState, localClientReqq) } value := &peerRequestState{} c.peerRequests[r] = value @@ -1104,7 +1000,7 @@ func (c *PeerConn) peerRequestDataReadFailed(err error, r Request) { if c.choking { c.logger.WithDefaultLevel(log.Warning).Printf("already choking peer, requests might not be rejected correctly") } - c.choke(c.post) + c.choke(c.write) } func readPeerRequestData(r Request, c *PeerConn) ([]byte, error) { @@ -1165,7 +1061,6 @@ func (c *PeerConn) mainReadLoop() (err error) { if err != nil { return err } - c.readMsg(&msg) c.lastMessageReceived = time.Now() if msg.Keepalive { receivedKeepalives.Add(1) @@ -1205,6 +1100,7 @@ func (c *PeerConn) mainReadLoop() (err error) { r := newRequestFromMessage(&msg) err = c.onReadRequest(r) case pp.Piece: + c.doChunkReadStats(int64(len(msg.Piece))) err = c.receiveChunk(&msg) if len(msg.Piece) == int(t.chunkSize) { t.chunkPool.Put(&msg.Piece) @@ -1243,7 +1139,7 @@ func (c *PeerConn) mainReadLoop() (err error) { case pp.AllowedFast: torrent.Add("allowed fasts received", 1) log.Fmsg("peer allowed fast: %d", msg.Index).AddValues(c).SetLevel(log.Debug).Log(c.t.logger) - c.peerAllowedFast.Add(int(msg.Index)) + c.peerAllowedFast.Add(bitmap.BitIndex(msg.Index)) c.updateRequests() case pp.Extended: err = c.onReadExtendedMsg(msg.ExtendedID, msg.ExtendedPayload) @@ -1353,10 +1249,12 @@ func (cn *PeerConn) rw() io.ReadWriter { }{cn.r, cn.w} } +func (c *Peer) doChunkReadStats(size int64) { + c.allStats(func(cs *ConnStats) { cs.receivedChunk(size) }) +} + // Handle a received chunk from a peer. func (c *Peer) receiveChunk(msg *pp.Message) error { - t := c.t - cl := t.cl chunksReceived.Add("total", 1) req := newRequestFromMessage(msg) @@ -1371,21 +1269,24 @@ func (c *Peer) receiveChunk(msg *pp.Message) error { } c.decExpectedChunkReceive(req) - if c.peerChoking && c.peerAllowedFast.Get(int(req.Index)) { + if c.peerChoking && c.peerAllowedFast.Get(bitmap.BitIndex(req.Index)) { chunksReceived.Add("due to allowed fast", 1) } - // TODO: This needs to happen immediately, to prevent cancels occurring asynchronously when have - // actually already received the piece, while we have the Client unlocked to write the data out. + // The request needs to be deleted immediately to prevent cancels occurring asynchronously when + // have actually already received the piece, while we have the Client unlocked to write the data + // out. + deletedRequest := false { - if _, ok := c.requests[req]; ok { + if _, ok := c.actualRequestState.Requests[req]; ok { for _, f := range c.callbacks.ReceivedRequested { f(PeerMessageEvent{c, msg}) } } // Request has been satisfied. if c.deleteRequest(req) { - if c.expectingChunks() { + deletedRequest = true + if !c.peerChoking { c._chunksReceivedWhileExpecting++ } } else { @@ -1393,6 +1294,9 @@ func (c *Peer) receiveChunk(msg *pp.Message) error { } } + t := c.t + cl := t.cl + // Do we actually want this chunk? if t.haveChunk(req) { chunksReceived.Add("wasted", 1) @@ -1404,14 +1308,14 @@ func (c *Peer) receiveChunk(msg *pp.Message) error { c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadUseful })) c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulData })) + if deletedRequest { + c.piecesReceivedSinceLastRequestUpdate++ + c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulIntendedData })) + } for _, f := range c.t.cl.config.Callbacks.ReceivedUsefulData { f(ReceivedUsefulDataEvent{c, msg}) } c.lastUsefulChunkReceived = time.Now() - // if t.fastestPeer != c { - // log.Printf("setting fastest connection %p", c) - // } - t.fastestPeer = c // Need to record that it hasn't been written yet, before we attempt to do // anything with it. @@ -1425,7 +1329,7 @@ func (c *Peer) receiveChunk(msg *pp.Message) error { if p == c { return } - p.postCancel(req) + p.cancel(req) }) err := func() error { @@ -1503,7 +1407,7 @@ func (c *PeerConn) uploadAllowed() bool { func (c *PeerConn) setRetryUploadTimer(delay time.Duration) { if c.uploadTimer == nil { - c.uploadTimer = time.AfterFunc(delay, c.writerCond.Broadcast) + c.uploadTimer = time.AfterFunc(delay, c.tickleWriter) } else { c.uploadTimer.Reset(delay) } @@ -1559,19 +1463,19 @@ func (c *Peer) peerHasWantedPieces() bool { } func (c *Peer) numLocalRequests() int { - return len(c.requests) + return len(c.actualRequestState.Requests) } func (c *Peer) deleteRequest(r Request) bool { - if _, ok := c.requests[r]; !ok { + delete(c.nextRequestState.Requests, r) + if _, ok := c.actualRequestState.Requests[r]; !ok { return false } - delete(c.requests, r) + delete(c.actualRequestState.Requests, r) for _, f := range c.callbacks.DeletedRequest { f(PeerRequestEvent{c, r}) } c.updateExpectingChunks() - c.t.requestStrategy.hooks().deletedRequest(r) pr := c.t.pendingRequests pr[r]-- n := pr[r] @@ -1581,36 +1485,17 @@ func (c *Peer) deleteRequest(r Request) bool { if n < 0 { panic(n) } - // If a request fails, updating the requests for the current peer first may miss the opportunity - // to try other peers for that request instead, depending on the request strategy. This might - // only affect webseed peers though, since they synchronously issue new requests: PeerConns do - // it in the writer routine. - const updateCurrentConnRequestsFirst = false - if updateCurrentConnRequestsFirst { - c.updateRequests() - } - // Give other conns a chance to pick up the request. - c.t.iterPeers(func(_c *Peer) { - // We previously checked that the peer wasn't interested to to only wake connections that - // were unable to issue requests due to starvation by the request strategy. There could be - // performance ramifications. - if _c != c && c.peerHasPiece(pieceIndex(r.Index)) { - _c.updateRequests() - } - }) - if !updateCurrentConnRequestsFirst { - c.updateRequests() - } return true } func (c *Peer) deleteAllRequests() { - for r := range c.requests { + for r := range c.actualRequestState.Requests { c.deleteRequest(r) } - if len(c.requests) != 0 { - panic(len(c.requests)) + if l := len(c.actualRequestState.Requests); l != 0 { + panic(l) } + c.nextRequestState.Requests = nil // for c := range c.t.conns { // c.tickleWriter() // } @@ -1619,19 +1504,7 @@ func (c *Peer) deleteAllRequests() { // This is called when something has changed that should wake the writer, such as putting stuff into // the writeBuffer, or changing some state that the writer can act on. func (c *PeerConn) tickleWriter() { - c.writerCond.Broadcast() -} - -func (c *Peer) postCancel(r Request) bool { - if !c.deleteRequest(r) { - return false - } - c.peerImpl._postCancel(r) - return true -} - -func (c *PeerConn) _postCancel(r Request) { - c.post(makeCancelMessage(r)) + c.messageWriter.writeCond.Broadcast() } func (c *PeerConn) sendChunk(r Request, msg func(pp.Message) bool, state *peerRequestState) (more bool) { @@ -1722,33 +1595,27 @@ func (l connectionTrust) Less(r connectionTrust) bool { return multiless.New().Bool(l.Implicit, r.Implicit).Int64(l.NetGoodPiecesDirted, r.NetGoodPiecesDirted).Less() } -func (cn *Peer) requestStrategyConnection() requestStrategyConnection { - return cn -} - -func (cn *Peer) chunksReceivedWhileExpecting() int64 { - return cn._chunksReceivedWhileExpecting -} - -func (cn *Peer) fastest() bool { - return cn == cn.t.fastestPeer -} - func (cn *Peer) peerMaxRequests() int { return cn.PeerMaxRequests } -// Returns the pieces the peer has claimed to have. +// Returns the pieces the peer could have based on their claims. If we don't know how many pieces +// are in the torrent, it could be a very large range the peer has sent HaveAll. func (cn *PeerConn) PeerPieces() bitmap.Bitmap { cn.locker().RLock() defer cn.locker().RUnlock() - return cn.peerPieces() + return cn.newPeerPieces() } -func (cn *Peer) peerPieces() bitmap.Bitmap { +// Returns a new Bitmap that includes bits for all pieces the peer could have based on their claims. +func (cn *Peer) newPeerPieces() bitmap.Bitmap { ret := cn._peerPieces.Copy() if cn.peerSentHaveAll { - ret.AddRange(0, cn.t.numPieces()) + if cn.t.haveInfo() { + ret.AddRange(0, bitmap.BitRange(cn.t.numPieces())) + } else { + ret.AddRange(0, bitmap.ToEnd) + } } return ret } @@ -1761,11 +1628,11 @@ func (cn *Peer) stats() *ConnStats { return &cn._stats } -func (cn *Peer) torrent() requestStrategyTorrent { - return cn.t.requestStrategyTorrent() -} - func (p *Peer) TryAsPeerConn() (*PeerConn, bool) { pc, ok := p.peerImpl.(*PeerConn) return pc, ok } + +func (p *PeerConn) onNextRequestStateChanged() { + p.tickleWriter() +} diff --git a/peerconn_test.go b/peerconn_test.go index 7057e77d..c28bc632 100644 --- a/peerconn_test.go +++ b/peerconn_test.go @@ -5,7 +5,6 @@ import ( "net" "sync" "testing" - "time" "github.com/anacrolix/missinggo/pubsub" "github.com/bradfitz/iter" @@ -32,7 +31,7 @@ func TestSendBitfieldThenHave(t *testing.T) { r, w := io.Pipe() //c.r = r c.w = w - go c.writer(time.Minute) + c.startWriter() c.locker().Lock() c.t._completedPieces.Add(1) c.postBitfield( /*[]bool{false, true, false}*/ ) @@ -98,7 +97,7 @@ func BenchmarkConnectionMainReadLoop(b *testing.B) { ts := &torrentStorage{} t := &Torrent{ cl: cl, - storage: &storage.Torrent{TorrentImpl: ts}, + storage: &storage.Torrent{TorrentImpl: storage.TorrentImpl{Piece: ts.Piece, Close: ts.Close}}, pieceStateChanges: pubsub.NewPubSub(), } require.NoError(b, t.setInfo(&metainfo.Info{ diff --git a/pexconn_test.go b/pexconn_test.go index a3ff1d02..7bb61ecd 100644 --- a/pexconn_test.go +++ b/pexconn_test.go @@ -21,7 +21,7 @@ func TestPexConnState(t *testing.T) { c := cl.newConnection(nil, false, addr, addr.Network(), "") c.PeerExtensionIDs = make(map[pp.ExtensionName]pp.ExtensionNumber) c.PeerExtensionIDs[pp.ExtensionNamePex] = pexExtendedId - c.writerCond.L.Lock() + c.messageWriter.mu.Lock() c.setTorrent(torrent) torrent.addPeerConn(c) @@ -36,7 +36,7 @@ func TestPexConnState(t *testing.T) { out = m return true } - c.writerCond.Wait() + <-c.messageWriter.writeCond.Signaled() c.pex.Share(testWriter) require.True(t, writerCalled) require.EqualValues(t, pp.Extended, out.Type) diff --git a/piece.go b/piece.go index fb9c8056..45b37c0a 100644 --- a/piece.go +++ b/piece.go @@ -11,33 +11,6 @@ import ( "github.com/anacrolix/torrent/storage" ) -// Describes the importance of obtaining a particular piece. -type piecePriority byte - -func (pp *piecePriority) Raise(maybe piecePriority) bool { - if maybe > *pp { - *pp = maybe - return true - } - return false -} - -// Priority for use in PriorityBitmap -func (me piecePriority) BitmapPriority() int { - return -int(me) -} - -const ( - PiecePriorityNone piecePriority = iota // Not wanted. Must be the zero value. - PiecePriorityNormal // Wanted. - PiecePriorityHigh // Wanted a lot. - PiecePriorityReadahead // May be required soon. - // Succeeds a piece where a read occurred. Currently the same as Now, - // apparently due to issues with caching. - PiecePriorityNext - PiecePriorityNow // A Reader is reading in this piece. Highest urgency. -) - type Piece struct { // The completed piece SHA1 hash, from the metainfo "pieces" field. hash *metainfo.Hash @@ -55,6 +28,7 @@ type Piece struct { publicPieceState PieceState priority piecePriority + availability int64 // This can be locked when the Client lock is taken, but probably not vice versa. pendingWritesMutex sync.Mutex @@ -79,7 +53,7 @@ func (p *Piece) Storage() storage.Piece { } func (p *Piece) pendingChunkIndex(chunkIndex int) bool { - return !p._dirtyChunks.Contains(chunkIndex) + return !p._dirtyChunks.Contains(bitmap.BitIndex(chunkIndex)) } func (p *Piece) pendingChunk(cs ChunkSpec, chunkSize pp.Integer) bool { @@ -95,12 +69,12 @@ func (p *Piece) numDirtyChunks() pp.Integer { } func (p *Piece) unpendChunkIndex(i int) { - p._dirtyChunks.Add(i) + p._dirtyChunks.Add(bitmap.BitIndex(i)) p.t.tickleReaders() } func (p *Piece) pendChunkIndex(i int) { - p._dirtyChunks.Remove(i) + p._dirtyChunks.Remove(bitmap.BitIndex(i)) } func (p *Piece) numChunks() pp.Integer { @@ -144,7 +118,7 @@ func (p *Piece) chunkIndexSpec(chunk pp.Integer) ChunkSpec { func (p *Piece) chunkIndexRequest(chunkIndex pp.Integer) Request { return Request{ pp.Integer(p.index), - chunkIndexSpec(chunkIndex, p.length(), p.chunkSize()), + p.chunkIndexSpec(chunkIndex), } } @@ -221,14 +195,11 @@ func (p *Piece) SetPriority(prio piecePriority) { p.t.updatePiecePriority(p.index) } -func (p *Piece) uncachedPriority() (ret piecePriority) { - if p.t.pieceComplete(p.index) || p.t.pieceQueuedForHash(p.index) || p.t.hashingPiece(p.index) { - return PiecePriorityNone - } +func (p *Piece) purePriority() (ret piecePriority) { for _, f := range p.files { ret.Raise(f.prio) } - if p.t.readerNowPieces().Contains(int(p.index)) { + if p.t.readerNowPieces().Contains(bitmap.BitIndex(p.index)) { ret.Raise(PiecePriorityNow) } // if t._readerNowPieces.Contains(piece - 1) { @@ -241,6 +212,13 @@ func (p *Piece) uncachedPriority() (ret piecePriority) { return } +func (p *Piece) uncachedPriority() (ret piecePriority) { + if p.t.pieceComplete(p.index) || p.t.pieceQueuedForHash(p.index) || p.t.hashingPiece(p.index) { + return PiecePriorityNone + } + return p.purePriority() +} + // Tells the Client to refetch the completion status from storage, updating priority etc. if // necessary. Might be useful if you know the state of the piece data has changed externally. func (p *Piece) UpdateCompletion() { @@ -256,11 +234,7 @@ func (p *Piece) completion() (ret storage.Completion) { } func (p *Piece) allChunksDirty() bool { - return p._dirtyChunks.Len() == int(p.numChunks()) -} - -func (p *Piece) requestStrategyPiece() requestStrategyPiece { - return p + return p._dirtyChunks.Len() == bitmap.BitRange(p.numChunks()) } func (p *Piece) dirtyChunks() bitmap.Bitmap { @@ -270,3 +244,15 @@ func (p *Piece) dirtyChunks() bitmap.Bitmap { func (p *Piece) State() PieceState { return p.t.PieceState(p.index) } + +func (p *Piece) iterUndirtiedChunks(f func(cs ChunkSpec) bool) bool { + for i := pp.Integer(0); i < p.numChunks(); i++ { + if p.chunkIndexDirty(i) { + continue + } + if !f(p.chunkIndexSpec(i)) { + return false + } + } + return true +} diff --git a/request-strategy-defaults.go b/request-strategy-defaults.go deleted file mode 100644 index 1ece5fc6..00000000 --- a/request-strategy-defaults.go +++ /dev/null @@ -1,45 +0,0 @@ -package torrent - -import ( - "github.com/anacrolix/missinggo/iter" - "github.com/anacrolix/missinggo/v2/bitmap" - pp "github.com/anacrolix/torrent/peer_protocol" -) - -// Provides default implementations for requestStrategy methods. Could be embedded, or delegated to. -type requestStrategyDefaults struct{} - -func (requestStrategyDefaults) hooks() requestStrategyHooks { - return requestStrategyHooks{ - sentRequest: func(Request) {}, - deletedRequest: func(Request) {}, - } -} - -func (requestStrategyDefaults) iterUndirtiedChunks(p requestStrategyPiece, f func(ChunkSpec) bool) bool { - chunkIndices := p.dirtyChunks().Copy() - chunkIndices.FlipRange(0, bitmap.BitIndex(p.numChunks())) - return iter.ForPerm(chunkIndices.Len(), func(i int) bool { - ci, err := chunkIndices.RB.Select(uint32(i)) - if err != nil { - panic(err) - } - return f(p.chunkIndexRequest(pp.Integer(ci)).ChunkSpec) - }) -} - -func (requestStrategyDefaults) nominalMaxRequests(cn requestStrategyConnection) int { - return int( - max( - 64, - cn.stats().ChunksReadUseful.Int64()- - (cn.stats().ChunksRead.Int64()-cn.stats().ChunksReadUseful.Int64()))) -} - -func (requestStrategyDefaults) piecePriority(cn requestStrategyConnection, piece pieceIndex, tpp piecePriority, prio int) int { - return prio -} - -func (requestStrategyDefaults) shouldRequestWithoutBias(cn requestStrategyConnection) bool { - return false -} diff --git a/request-strategy.go b/request-strategy.go deleted file mode 100644 index 75cff963..00000000 --- a/request-strategy.go +++ /dev/null @@ -1,223 +0,0 @@ -package torrent - -import ( - "math" - "sync" - "time" - - "github.com/anacrolix/missinggo/v2/bitmap" - "github.com/anacrolix/missinggo/v2/prioritybitmap" - - pp "github.com/anacrolix/torrent/peer_protocol" -) - -type requestStrategyPiece interface { - numChunks() pp.Integer - dirtyChunks() bitmap.Bitmap - chunkIndexRequest(i pp.Integer) Request -} - -type requestStrategyTorrent interface { - numConns() int - numReaders() int - numPieces() int - readerPiecePriorities() (now, readahead bitmap.Bitmap) - ignorePieces() bitmap.Bitmap - pendingPieces() *prioritybitmap.PriorityBitmap -} - -type requestStrategyConnection interface { - torrent() requestStrategyTorrent - peerPieces() bitmap.Bitmap - pieceRequestOrder() *prioritybitmap.PriorityBitmap - fastest() bool - stats() *ConnStats - totalExpectingTime() time.Duration - peerMaxRequests() int - chunksReceivedWhileExpecting() int64 -} - -type requestStrategy interface { - iterPendingPieces(requestStrategyConnection, func(pieceIndex) bool) bool - iterUndirtiedChunks(requestStrategyPiece, func(ChunkSpec) bool) bool - nominalMaxRequests(requestStrategyConnection) int - shouldRequestWithoutBias(requestStrategyConnection) bool - piecePriority(requestStrategyConnection, pieceIndex, piecePriority, int) int - hooks() requestStrategyHooks -} - -type requestStrategyHooks struct { - sentRequest func(Request) - deletedRequest func(Request) -} - -type requestStrategyCallbacks interface { - requestTimedOut(Request) -} - -type requestStrategyFuzzing struct { - requestStrategyDefaults -} - -type requestStrategyFastest struct { - requestStrategyDefaults -} - -func newRequestStrategyMaker(rs requestStrategy) requestStrategyMaker { - return func(requestStrategyCallbacks, sync.Locker) requestStrategy { - return rs - } -} - -// The fastest connection downloads strictly in order of priority, while all others adhere to their -// piece inclinations. -func RequestStrategyFastest() requestStrategyMaker { - return newRequestStrategyMaker(requestStrategyFastest{}) -} - -// Favour higher priority pieces with some fuzzing to reduce overlaps and wastage across -// connections. -func RequestStrategyFuzzing() requestStrategyMaker { - return newRequestStrategyMaker(requestStrategyFuzzing{}) -} - -func (requestStrategyFastest) shouldRequestWithoutBias(cn requestStrategyConnection) bool { - if cn.torrent().numReaders() == 0 { - return false - } - if cn.torrent().numConns() == 1 { - return true - } - if cn.fastest() { - return true - } - return false -} - -type requestStrategyDuplicateRequestTimeout struct { - requestStrategyDefaults - // How long to avoid duplicating a pending request. - duplicateRequestTimeout time.Duration - - callbacks requestStrategyCallbacks - - // The last time we requested a chunk. Deleting the request from any connection will clear this - // value. - lastRequested map[Request]*time.Timer - // The lock to take when running a request timeout handler. - timeoutLocker sync.Locker -} - -// Generates a request strategy instance for a given torrent. callbacks are probably specific to the torrent. -type requestStrategyMaker func(callbacks requestStrategyCallbacks, clientLocker sync.Locker) requestStrategy - -// Requests are strictly by piece priority, and not duplicated until duplicateRequestTimeout is -// reached. -func RequestStrategyDuplicateRequestTimeout(duplicateRequestTimeout time.Duration) requestStrategyMaker { - return func(callbacks requestStrategyCallbacks, clientLocker sync.Locker) requestStrategy { - return requestStrategyDuplicateRequestTimeout{ - duplicateRequestTimeout: duplicateRequestTimeout, - callbacks: callbacks, - lastRequested: make(map[Request]*time.Timer), - timeoutLocker: clientLocker, - } - } -} - -func (rs requestStrategyDuplicateRequestTimeout) hooks() requestStrategyHooks { - return requestStrategyHooks{ - deletedRequest: func(r Request) { - if t, ok := rs.lastRequested[r]; ok { - t.Stop() - delete(rs.lastRequested, r) - } - }, - sentRequest: rs.onSentRequest, - } -} - -func (rs requestStrategyDuplicateRequestTimeout) iterUndirtiedChunks(p requestStrategyPiece, f func(ChunkSpec) bool) bool { - for i := pp.Integer(0); i < pp.Integer(p.numChunks()); i++ { - if p.dirtyChunks().Get(bitmap.BitIndex(i)) { - continue - } - r := p.chunkIndexRequest(i) - if rs.wouldDuplicateRecent(r) { - continue - } - if !f(r.ChunkSpec) { - return false - } - } - return true -} - -func (requestStrategyFuzzing) piecePriority(cn requestStrategyConnection, piece pieceIndex, tpp piecePriority, prio int) int { - switch tpp { - case PiecePriorityNormal: - case PiecePriorityReadahead: - prio -= int(cn.torrent().numPieces()) - case PiecePriorityNext, PiecePriorityNow: - prio -= 2 * int(cn.torrent().numPieces()) - default: - panic(tpp) - } - prio += int(piece / 3) - return prio -} - -func (requestStrategyDuplicateRequestTimeout) iterPendingPieces(cn requestStrategyConnection, f func(pieceIndex) bool) bool { - return iterUnbiasedPieceRequestOrder(cn, f) -} -func defaultIterPendingPieces(rs requestStrategy, cn requestStrategyConnection, f func(pieceIndex) bool) bool { - if rs.shouldRequestWithoutBias(cn) { - return iterUnbiasedPieceRequestOrder(cn, f) - } else { - return cn.pieceRequestOrder().IterTyped(func(i int) bool { - return f(pieceIndex(i)) - }) - } -} -func (rs requestStrategyFuzzing) iterPendingPieces(cn requestStrategyConnection, cb func(pieceIndex) bool) bool { - return defaultIterPendingPieces(rs, cn, cb) -} -func (rs requestStrategyFastest) iterPendingPieces(cn requestStrategyConnection, cb func(pieceIndex) bool) bool { - return defaultIterPendingPieces(rs, cn, cb) -} - -func (rs requestStrategyDuplicateRequestTimeout) onSentRequest(r Request) { - rs.lastRequested[r] = time.AfterFunc(rs.duplicateRequestTimeout, func() { - rs.timeoutLocker.Lock() - delete(rs.lastRequested, r) - rs.timeoutLocker.Unlock() - rs.callbacks.requestTimedOut(r) - }) -} - -// The actual value to use as the maximum outbound requests. -func (rs requestStrategyDuplicateRequestTimeout) nominalMaxRequests(cn requestStrategyConnection) (ret int) { - expectingTime := int64(cn.totalExpectingTime()) - if expectingTime == 0 { - expectingTime = math.MaxInt64 - } else { - expectingTime *= 2 - } - return int(clamp( - 1, - int64(cn.peerMaxRequests()), - max( - // It makes sense to always pipeline at least one connection, since latency must be - // non-zero. - 2, - // Request only as many as we expect to receive in the duplicateRequestTimeout - // window. We are trying to avoid having to duplicate requests. - cn.chunksReceivedWhileExpecting()*int64(rs.duplicateRequestTimeout)/expectingTime, - ), - )) -} -func (rs requestStrategyDuplicateRequestTimeout) wouldDuplicateRecent(r Request) bool { - // This piece has been requested on another connection, and the duplicate request timer is still - // running. - _, ok := rs.lastRequested[r] - return ok -} diff --git a/request-strategy/order.go b/request-strategy/order.go new file mode 100644 index 00000000..2a364eb8 --- /dev/null +++ b/request-strategy/order.go @@ -0,0 +1,349 @@ +package request_strategy + +import ( + "fmt" + "sort" + + "github.com/anacrolix/multiless" + + pp "github.com/anacrolix/torrent/peer_protocol" + "github.com/anacrolix/torrent/types" +) + +type ( + Request = types.Request + pieceIndex = types.PieceIndex + piecePriority = types.PiecePriority + // This can be made into a type-param later, will be great for testing. + ChunkSpec = types.ChunkSpec +) + +type ClientPieceOrder struct{} + +type filterTorrent struct { + Torrent + unverifiedBytes int64 + // Potentially shared with other torrents. + storageLeft *int64 +} + +func sortFilterPieces(pieces []filterPiece) { + sort.Slice(pieces, func(_i, _j int) bool { + i := pieces[_i] + j := pieces[_j] + return multiless.New().Int( + int(j.Priority), int(i.Priority), + ).Bool( + j.Partial, i.Partial, + ).Int64( + i.Availability, j.Availability, + ).Int( + i.index, j.index, + ).Uintptr( + i.t.StableId, j.t.StableId, + ).MustLess() + }) +} + +type requestsPeer struct { + Peer + nextState PeerNextRequestState + requestablePiecesRemaining int +} + +func (rp *requestsPeer) canFitRequest() bool { + return len(rp.nextState.Requests) < rp.MaxRequests +} + +func (rp *requestsPeer) addNextRequest(r Request) { + _, ok := rp.nextState.Requests[r] + if ok { + panic("should only add once") + } + rp.nextState.Requests[r] = struct{}{} +} + +type peersForPieceRequests struct { + requestsInPiece int + *requestsPeer +} + +func (me *peersForPieceRequests) addNextRequest(r Request) { + me.requestsPeer.addNextRequest(r) + me.requestsInPiece++ +} + +type requestablePiece struct { + index pieceIndex + t Torrent + alwaysReallocate bool + NumPendingChunks int + IterPendingChunks ChunksIter +} + +type filterPiece struct { + t *filterTorrent + index pieceIndex + Piece +} + +func getRequestablePieces(input Input) (ret []requestablePiece) { + // Storage capacity left for this run, keyed by the storage capacity pointer on the storage + // TorrentImpl. + storageLeft := make(map[*func() *int64]*int64) + var pieces []filterPiece + for _, _t := range input.Torrents { + // TODO: We could do metainfo requests here. + t := &filterTorrent{ + Torrent: _t, + unverifiedBytes: 0, + } + key := t.Capacity + if key != nil { + if _, ok := storageLeft[key]; !ok { + storageLeft[key] = (*key)() + } + t.storageLeft = storageLeft[key] + } + for i, tp := range t.Pieces { + pieces = append(pieces, filterPiece{ + t: t, + index: i, + Piece: tp, + }) + } + } + sortFilterPieces(pieces) + var allTorrentsUnverifiedBytes int64 + for _, piece := range pieces { + if left := piece.t.storageLeft; left != nil { + if *left < int64(piece.Length) { + continue + } + *left -= int64(piece.Length) + } + if !piece.Request || piece.NumPendingChunks == 0 { + // TODO: Clarify exactly what is verified. Stuff that's being hashed should be + // considered unverified and hold up further requests. + continue + } + if piece.t.MaxUnverifiedBytes != 0 && piece.t.unverifiedBytes+piece.Length > piece.t.MaxUnverifiedBytes { + continue + } + if input.MaxUnverifiedBytes != 0 && allTorrentsUnverifiedBytes+piece.Length > input.MaxUnverifiedBytes { + continue + } + piece.t.unverifiedBytes += piece.Length + allTorrentsUnverifiedBytes += piece.Length + ret = append(ret, requestablePiece{ + index: piece.index, + t: piece.t.Torrent, + NumPendingChunks: piece.NumPendingChunks, + IterPendingChunks: piece.iterPendingChunksWrapper, + alwaysReallocate: piece.Priority >= types.PiecePriorityNext, + }) + } + return +} + +type Input struct { + Torrents []Torrent + MaxUnverifiedBytes int64 +} + +// TODO: We could do metainfo requests here. +func Run(input Input) map[PeerId]PeerNextRequestState { + requestPieces := getRequestablePieces(input) + torrents := input.Torrents + allPeers := make(map[uintptr][]*requestsPeer, len(torrents)) + for _, t := range torrents { + peers := make([]*requestsPeer, 0, len(t.Peers)) + for _, p := range t.Peers { + peers = append(peers, &requestsPeer{ + Peer: p, + nextState: PeerNextRequestState{ + Requests: make(map[Request]struct{}, p.MaxRequests), + }, + }) + } + allPeers[t.StableId] = peers + } + for _, piece := range requestPieces { + for _, peer := range allPeers[piece.t.StableId] { + if peer.canRequestPiece(piece.index) { + peer.requestablePiecesRemaining++ + } + } + } + for _, piece := range requestPieces { + allocatePendingChunks(piece, allPeers[piece.t.StableId]) + } + ret := make(map[PeerId]PeerNextRequestState) + for _, peers := range allPeers { + for _, rp := range peers { + if rp.requestablePiecesRemaining != 0 { + panic(rp.requestablePiecesRemaining) + } + if _, ok := ret[rp.Id]; ok { + panic(fmt.Sprintf("duplicate peer id: %v", rp.Id)) + } + ret[rp.Id] = rp.nextState + } + } + return ret +} + +// Checks that a sorted peersForPiece slice makes sense. +func ensureValidSortedPeersForPieceRequests(peers []*peersForPieceRequests, sortLess func(_, _ int) bool) { + if !sort.SliceIsSorted(peers, sortLess) { + panic("not sorted") + } + peerMap := make(map[*peersForPieceRequests]struct{}, len(peers)) + for _, p := range peers { + if _, ok := peerMap[p]; ok { + panic(p) + } + peerMap[p] = struct{}{} + } +} + +func allocatePendingChunks(p requestablePiece, peers []*requestsPeer) { + peersForPiece := make([]*peersForPieceRequests, 0, len(peers)) + for _, peer := range peers { + peersForPiece = append(peersForPiece, &peersForPieceRequests{ + requestsInPiece: 0, + requestsPeer: peer, + }) + } + defer func() { + for _, peer := range peersForPiece { + if peer.canRequestPiece(p.index) { + peer.requestablePiecesRemaining-- + } + } + }() + sortPeersForPiece := func(req *Request) { + less := func(i, j int) bool { + byHasRequest := func() multiless.Computation { + ml := multiless.New() + if req != nil { + _, iHas := peersForPiece[i].nextState.Requests[*req] + _, jHas := peersForPiece[j].nextState.Requests[*req] + ml = ml.Bool(jHas, iHas) + } + return ml + }() + ml := multiless.New() + // We always "reallocate", that is force even striping amongst peers that are either on + // the last piece they can contribute too, or for pieces marked for this behaviour. + // Striping prevents starving peers of requests, and will always re-balance to the + // fastest known peers. + if !p.alwaysReallocate { + ml = ml.Bool( + peersForPiece[j].requestablePiecesRemaining == 1, + peersForPiece[i].requestablePiecesRemaining == 1) + } + if p.alwaysReallocate || peersForPiece[j].requestablePiecesRemaining == 1 { + ml = ml.Int( + peersForPiece[i].requestsInPiece, + peersForPiece[j].requestsInPiece) + } else { + ml = ml.AndThen(byHasRequest) + } + ml = ml.Int( + peersForPiece[i].requestablePiecesRemaining, + peersForPiece[j].requestablePiecesRemaining, + ).Float64( + peersForPiece[j].DownloadRate, + peersForPiece[i].DownloadRate, + ) + ml = ml.AndThen(byHasRequest) + return ml.Int64( + int64(peersForPiece[j].Age), int64(peersForPiece[i].Age), + // TODO: Probably peer priority can come next + ).Uintptr( + peersForPiece[i].Id.Uintptr(), + peersForPiece[j].Id.Uintptr(), + ).MustLess() + } + sort.Slice(peersForPiece, less) + ensureValidSortedPeersForPieceRequests(peersForPiece, less) + } + // Chunks can be preassigned several times, if peers haven't been able to update their "actual" + // with "next" request state before another request strategy run occurs. + preallocated := make(map[ChunkSpec][]*peersForPieceRequests, p.NumPendingChunks) + p.IterPendingChunks(func(spec ChunkSpec) { + req := Request{pp.Integer(p.index), spec} + for _, peer := range peersForPiece { + if h := peer.HasExistingRequest; h == nil || !h(req) { + continue + } + if !peer.canFitRequest() { + continue + } + if !peer.canRequestPiece(p.index) { + continue + } + preallocated[spec] = append(preallocated[spec], peer) + peer.addNextRequest(req) + } + }) + pendingChunksRemaining := int(p.NumPendingChunks) + p.IterPendingChunks(func(chunk types.ChunkSpec) { + if _, ok := preallocated[chunk]; ok { + return + } + req := Request{pp.Integer(p.index), chunk} + defer func() { pendingChunksRemaining-- }() + sortPeersForPiece(nil) + for _, peer := range peersForPiece { + if !peer.canFitRequest() { + continue + } + if !peer.HasPiece(p.index) { + continue + } + if !peer.pieceAllowedFastOrDefault(p.index) { + // TODO: Verify that's okay to stay uninterested if we request allowed fast pieces. + peer.nextState.Interested = true + if peer.Choking { + continue + } + } + peer.addNextRequest(req) + break + } + }) +chunk: + for chunk, prePeers := range preallocated { + pendingChunksRemaining-- + req := Request{pp.Integer(p.index), chunk} + for _, pp := range prePeers { + pp.requestsInPiece-- + } + sortPeersForPiece(&req) + for _, pp := range prePeers { + delete(pp.nextState.Requests, req) + } + for _, peer := range peersForPiece { + if !peer.canFitRequest() { + continue + } + if !peer.HasPiece(p.index) { + continue + } + if !peer.pieceAllowedFastOrDefault(p.index) { + // TODO: Verify that's okay to stay uninterested if we request allowed fast pieces. + peer.nextState.Interested = true + if peer.Choking { + continue + } + } + peer.addNextRequest(req) + continue chunk + } + } + if pendingChunksRemaining != 0 { + panic(pendingChunksRemaining) + } +} diff --git a/request-strategy/order_test.go b/request-strategy/order_test.go new file mode 100644 index 00000000..d37b0423 --- /dev/null +++ b/request-strategy/order_test.go @@ -0,0 +1,297 @@ +package request_strategy + +import ( + "math" + "testing" + + "github.com/bradfitz/iter" + qt "github.com/frankban/quicktest" + + pp "github.com/anacrolix/torrent/peer_protocol" +) + +func r(i pieceIndex, begin int) Request { + return Request{pp.Integer(i), ChunkSpec{pp.Integer(begin), 1}} +} + +func chunkIterRange(end int) func(func(ChunkSpec)) { + return func(f func(ChunkSpec)) { + for offset := range iter.N(end) { + f(ChunkSpec{pp.Integer(offset), 1}) + } + } +} + +func chunkIter(offsets ...int) func(func(ChunkSpec)) { + return func(f func(ChunkSpec)) { + for _, offset := range offsets { + f(ChunkSpec{pp.Integer(offset), 1}) + } + } +} + +func requestSetFromSlice(rs ...Request) (ret map[Request]struct{}) { + ret = make(map[Request]struct{}, len(rs)) + for _, r := range rs { + ret[r] = struct{}{} + } + return +} + +type intPeerId int + +func (i intPeerId) Uintptr() uintptr { + return uintptr(i) +} + +func TestStealingFromSlowerPeer(t *testing.T) { + c := qt.New(t) + basePeer := Peer{ + HasPiece: func(i pieceIndex) bool { + return true + }, + MaxRequests: math.MaxInt16, + DownloadRate: 2, + } + // Slower than the stealers, but has all requests already. + stealee := basePeer + stealee.DownloadRate = 1 + stealee.HasExistingRequest = func(r Request) bool { + return true + } + stealee.Id = intPeerId(1) + firstStealer := basePeer + firstStealer.Id = intPeerId(2) + secondStealer := basePeer + secondStealer.Id = intPeerId(3) + results := Run(Input{Torrents: []Torrent{{ + Pieces: []Piece{{ + Request: true, + NumPendingChunks: 5, + IterPendingChunks: chunkIterRange(5), + }}, + Peers: []Peer{ + stealee, + firstStealer, + secondStealer, + }, + }}}) + + c.Assert(results, qt.HasLen, 3) + check := func(p PeerId, l int) { + c.Check(results[p].Requests, qt.HasLen, l) + c.Check(results[p].Interested, qt.Equals, l > 0) + } + check(stealee.Id, 1) + check(firstStealer.Id, 2) + check(secondStealer.Id, 2) +} + +func checkNumRequestsAndInterest(c *qt.C, next PeerNextRequestState, num int, interest bool) { + c.Check(next.Requests, qt.HasLen, num) + c.Check(next.Interested, qt.Equals, interest) +} + +func TestStealingFromSlowerPeersBasic(t *testing.T) { + c := qt.New(t) + basePeer := Peer{ + HasPiece: func(i pieceIndex) bool { + return true + }, + MaxRequests: math.MaxInt16, + DownloadRate: 2, + } + stealee := basePeer + stealee.DownloadRate = 1 + stealee.HasExistingRequest = func(r Request) bool { + return true + } + stealee.Id = intPeerId(1) + firstStealer := basePeer + firstStealer.Id = intPeerId(2) + secondStealer := basePeer + secondStealer.Id = intPeerId(3) + results := Run(Input{Torrents: []Torrent{{ + Pieces: []Piece{{ + Request: true, + NumPendingChunks: 2, + IterPendingChunks: chunkIter(0, 1), + }}, + Peers: []Peer{ + stealee, + firstStealer, + secondStealer, + }, + }}}) + + checkNumRequestsAndInterest(c, results[firstStealer.Id], 1, true) + checkNumRequestsAndInterest(c, results[secondStealer.Id], 1, true) + checkNumRequestsAndInterest(c, results[stealee.Id], 0, false) +} + +func TestPeerKeepsExistingIfReasonable(t *testing.T) { + c := qt.New(t) + basePeer := Peer{ + HasPiece: func(i pieceIndex) bool { + return true + }, + MaxRequests: math.MaxInt16, + DownloadRate: 2, + } + // Slower than the stealers, but has all requests already. + stealee := basePeer + stealee.DownloadRate = 1 + keepReq := r(0, 0) + stealee.HasExistingRequest = func(r Request) bool { + return r == keepReq + } + stealee.Id = intPeerId(1) + firstStealer := basePeer + firstStealer.Id = intPeerId(2) + secondStealer := basePeer + secondStealer.Id = intPeerId(3) + results := Run(Input{Torrents: []Torrent{{ + Pieces: []Piece{{ + Request: true, + NumPendingChunks: 4, + IterPendingChunks: chunkIter(0, 1, 3, 4), + }}, + Peers: []Peer{ + stealee, + firstStealer, + secondStealer, + }, + }}}) + + c.Assert(results, qt.HasLen, 3) + check := func(p PeerId, l int) { + c.Check(results[p].Requests, qt.HasLen, l) + c.Check(results[p].Interested, qt.Equals, l > 0) + } + check(firstStealer.Id, 2) + check(secondStealer.Id, 1) + c.Check(results[stealee.Id], qt.ContentEquals, PeerNextRequestState{ + Interested: true, + Requests: requestSetFromSlice(keepReq), + }) +} + +func TestDontStealUnnecessarily(t *testing.T) { + c := qt.New(t) + basePeer := Peer{ + HasPiece: func(i pieceIndex) bool { + return true + }, + MaxRequests: math.MaxInt16, + DownloadRate: 2, + } + // Slower than the stealers, but has all requests already. + stealee := basePeer + stealee.DownloadRate = 1 + keepReqs := requestSetFromSlice( + r(3, 2), r(3, 4), r(3, 6), r(3, 8), + r(4, 0), r(4, 1), r(4, 7), r(4, 8)) + stealee.HasExistingRequest = func(r Request) bool { + _, ok := keepReqs[r] + return ok + } + stealee.Id = intPeerId(1) + firstStealer := basePeer + firstStealer.Id = intPeerId(2) + secondStealer := basePeer + secondStealer.Id = intPeerId(3) + secondStealer.HasPiece = func(i pieceIndex) bool { + switch i { + case 1, 3: + return true + default: + return false + } + } + results := Run(Input{Torrents: []Torrent{{ + Pieces: []Piece{ + { + Request: true, + NumPendingChunks: 0, + IterPendingChunks: chunkIterRange(9), + }, + { + Request: true, + NumPendingChunks: 7, + IterPendingChunks: chunkIterRange(7), + }, + { + Request: true, + NumPendingChunks: 0, + IterPendingChunks: chunkIterRange(0), + }, + { + Request: true, + NumPendingChunks: 9, + IterPendingChunks: chunkIterRange(9), + }, + { + Request: true, + NumPendingChunks: 9, + IterPendingChunks: chunkIterRange(9), + }}, + Peers: []Peer{ + firstStealer, + stealee, + secondStealer, + }, + }}}) + + c.Assert(results, qt.HasLen, 3) + check := func(p PeerId, l int) { + c.Check(results[p].Requests, qt.HasLen, l) + c.Check(results[p].Interested, qt.Equals, l > 0) + } + check(firstStealer.Id, 5) + check(secondStealer.Id, 7+9) + c.Check(results[stealee.Id], qt.ContentEquals, PeerNextRequestState{ + Interested: true, + Requests: requestSetFromSlice(r(4, 0), r(4, 1), r(4, 7), r(4, 8)), + }) +} + +// This tests a situation where multiple peers had the same existing request, due to "actual" and +// "next" request states being out of sync. This reasonable occurs when a peer hasn't fully updated +// its actual request state since the last request strategy run. +func TestDuplicatePreallocations(t *testing.T) { + peer := func(id int, downloadRate float64) Peer { + return Peer{ + HasExistingRequest: func(r Request) bool { + return true + }, + MaxRequests: 2, + HasPiece: func(i pieceIndex) bool { + return true + }, + Id: intPeerId(id), + DownloadRate: downloadRate, + } + } + results := Run(Input{ + Torrents: []Torrent{{ + Pieces: []Piece{{ + Request: true, + NumPendingChunks: 1, + IterPendingChunks: chunkIterRange(1), + }, { + Request: true, + NumPendingChunks: 1, + IterPendingChunks: chunkIterRange(1), + }}, + Peers: []Peer{ + // The second peer was be marked as the preallocation, clobbering the first. The + // first peer is preferred, and the piece isn't striped, so it gets preallocated a + // request, and then gets reallocated from the peer the same request. + peer(1, 2), + peer(2, 1), + }, + }}, + }) + c := qt.New(t) + c.Assert(2, qt.Equals, len(results[intPeerId(1)].Requests)+len(results[intPeerId(2)].Requests)) +} diff --git a/request-strategy/peer.go b/request-strategy/peer.go new file mode 100644 index 00000000..21ef0d2e --- /dev/null +++ b/request-strategy/peer.go @@ -0,0 +1,38 @@ +package request_strategy + +import ( + "time" +) + +type PeerNextRequestState struct { + Interested bool + Requests map[Request]struct{} +} + +type PeerId interface { + Uintptr() uintptr +} + +type Peer struct { + HasPiece func(i pieceIndex) bool + MaxRequests int + HasExistingRequest func(r Request) bool + Choking bool + PieceAllowedFast func(pieceIndex) bool + DownloadRate float64 + Age time.Duration + // This is passed back out at the end, so must support equality. Could be a type-param later. + Id PeerId +} + +func (p *Peer) pieceAllowedFastOrDefault(i pieceIndex) bool { + if f := p.PieceAllowedFast; f != nil { + return f(i) + } + return false +} + +// TODO: This might be used in more places I think. +func (p *Peer) canRequestPiece(i pieceIndex) bool { + return p.HasPiece(i) && (!p.Choking || (p.PieceAllowedFast != nil && p.PieceAllowedFast(i))) +} diff --git a/request-strategy/piece.go b/request-strategy/piece.go new file mode 100644 index 00000000..bc59c052 --- /dev/null +++ b/request-strategy/piece.go @@ -0,0 +1,24 @@ +package request_strategy + +import ( + "github.com/anacrolix/torrent/types" +) + +type ChunksIter func(func(types.ChunkSpec)) + +type Piece struct { + Request bool + Priority piecePriority + Partial bool + Availability int64 + Length int64 + NumPendingChunks int + IterPendingChunks ChunksIter +} + +func (p Piece) iterPendingChunksWrapper(f func(ChunkSpec)) { + i := p.IterPendingChunks + if i != nil { + i(f) + } +} diff --git a/request-strategy/torrent.go b/request-strategy/torrent.go new file mode 100644 index 00000000..2090c7a8 --- /dev/null +++ b/request-strategy/torrent.go @@ -0,0 +1,11 @@ +package request_strategy + +type Torrent struct { + Pieces []Piece + Capacity *func() *int64 + Peers []Peer // not closed. + // Some value that's unique and stable between runs. Could even use the infohash? + StableId uintptr + + MaxUnverifiedBytes int64 +} diff --git a/requesting.go b/requesting.go new file mode 100644 index 00000000..f622b043 --- /dev/null +++ b/requesting.go @@ -0,0 +1,136 @@ +package torrent + +import ( + "time" + "unsafe" + + "github.com/anacrolix/missinggo/v2/bitmap" + + "github.com/anacrolix/chansync" + request_strategy "github.com/anacrolix/torrent/request-strategy" + "github.com/anacrolix/torrent/types" +) + +func (cl *Client) requester() { + for { + update := func() chansync.Signaled { + cl.lock() + defer cl.unlock() + cl.doRequests() + return cl.updateRequests.Signaled() + }() + // We can probably tune how often to heed this signal. TODO: Currently disabled to retain + // existing behaviour, while the signalling is worked out. + update = nil + select { + case <-cl.closed.LockedChan(cl.locker()): + return + case <-update: + case <-time.After(100 * time.Millisecond): + } + } +} + +func (cl *Client) tickleRequester() { + cl.updateRequests.Broadcast() +} + +func (cl *Client) doRequests() { + ts := make([]request_strategy.Torrent, 0, len(cl.torrents)) + for _, t := range cl.torrents { + rst := request_strategy.Torrent{ + StableId: uintptr(unsafe.Pointer(t)), + } + if t.storage != nil { + rst.Capacity = t.storage.Capacity + } + for i := range t.pieces { + p := &t.pieces[i] + rst.Pieces = append(rst.Pieces, request_strategy.Piece{ + Request: !t.ignorePieceForRequests(i), + Priority: p.purePriority(), + Partial: t.piecePartiallyDownloaded(i), + Availability: p.availability, + Length: int64(p.length()), + NumPendingChunks: int(t.pieceNumPendingChunks(i)), + IterPendingChunks: func(f func(types.ChunkSpec)) { + p.iterUndirtiedChunks(func(cs ChunkSpec) bool { + f(cs) + return true + }) + }, + }) + } + t.iterPeers(func(p *Peer) { + if p.closed.IsSet() { + return + } + if p.piecesReceivedSinceLastRequestUpdate > p.maxPiecesReceivedBetweenRequestUpdates { + p.maxPiecesReceivedBetweenRequestUpdates = p.piecesReceivedSinceLastRequestUpdate + } + p.piecesReceivedSinceLastRequestUpdate = 0 + rst.Peers = append(rst.Peers, request_strategy.Peer{ + HasPiece: p.peerHasPiece, + MaxRequests: p.nominalMaxRequests(), + HasExistingRequest: func(r request_strategy.Request) bool { + _, ok := p.actualRequestState.Requests[r] + return ok + }, + Choking: p.peerChoking, + PieceAllowedFast: func(i pieceIndex) bool { + return p.peerAllowedFast.Contains(bitmap.BitIndex(i)) + }, + DownloadRate: p.downloadRate(), + Age: time.Since(p.completedHandshake), + Id: (*peerId)(p), + }) + }) + ts = append(ts, rst) + } + nextPeerStates := request_strategy.Run(request_strategy.Input{ + Torrents: ts, + MaxUnverifiedBytes: cl.config.MaxUnverifiedBytes, + }) + for p, state := range nextPeerStates { + setPeerNextRequestState(p, state) + } +} + +type peerId Peer + +func (p *peerId) Uintptr() uintptr { + return uintptr(unsafe.Pointer(p)) +} + +func setPeerNextRequestState(_p request_strategy.PeerId, rp request_strategy.PeerNextRequestState) { + p := (*Peer)(_p.(*peerId)) + p.nextRequestState = rp + p.onNextRequestStateChanged() +} + +func (p *Peer) applyNextRequestState() bool { + next := p.nextRequestState + current := p.actualRequestState + if !p.setInterested(next.Interested) { + return false + } + for req := range current.Requests { + if _, ok := next.Requests[req]; !ok { + if !p.cancel(req) { + return false + } + } + } + for req := range next.Requests { + more, err := p.request(req) + if err != nil { + panic(err) + } else { + //log.Print(req) + } + if !more { + return false + } + } + return true +} diff --git a/storage/bolt.go b/storage/bolt.go index 5104c682..025c5665 100644 --- a/storage/bolt.go +++ b/storage/bolt.go @@ -43,7 +43,11 @@ func (me *boltClient) Close() error { } func (me *boltClient) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (TorrentImpl, error) { - return &boltTorrent{me, infoHash}, nil + t := &boltTorrent{me, infoHash} + return TorrentImpl{ + Piece: t.Piece, + Close: t.Close, + }, nil } func (me *boltTorrent) Piece(p metainfo.Piece) PieceImpl { diff --git a/storage/disabled/disabled.go b/storage/disabled/disabled.go new file mode 100644 index 00000000..22d51434 --- /dev/null +++ b/storage/disabled/disabled.go @@ -0,0 +1,58 @@ +package disabled + +import ( + "errors" + + "github.com/anacrolix/torrent/metainfo" + "github.com/anacrolix/torrent/storage" +) + +type Client struct{} + +var capacity int64 + +func (c Client) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (storage.TorrentImpl, error) { + capFunc := func() *int64 { + return &capacity + } + return storage.TorrentImpl{ + Piece: func(piece metainfo.Piece) storage.PieceImpl { + return Piece{} + }, + Close: func() error { + return nil + }, + Capacity: &capFunc, + }, nil +} + +func (c Client) capacity() *int64 { + return &capacity +} + +type Piece struct{} + +func (Piece) ReadAt(p []byte, off int64) (n int, err error) { + err = errors.New("disabled") + return +} + +func (Piece) WriteAt(p []byte, off int64) (n int, err error) { + err = errors.New("disabled") + return +} + +func (Piece) MarkComplete() error { + return errors.New("disabled") +} + +func (Piece) MarkNotComplete() error { + return errors.New("disabled") +} + +func (Piece) Completion() storage.Completion { + return storage.Completion{ + Complete: false, + Ok: true, + } +} diff --git a/storage/file.go b/storage/file.go index 1a273a34..d51a7f36 100644 --- a/storage/file.go +++ b/storage/file.go @@ -67,14 +67,16 @@ func (me *fileClientImpl) Close() error { return me.pc.Close() } -func (fs *fileClientImpl) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (TorrentImpl, error) { +func (fs *fileClientImpl) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (_ TorrentImpl, err error) { dir := fs.pathMaker(fs.baseDir, info, infoHash) upvertedFiles := info.UpvertedFiles() files := make([]file, 0, len(upvertedFiles)) for i, fileInfo := range upvertedFiles { - s, err := ToSafeFilePath(append([]string{info.Name}, fileInfo.Path...)...) + var s string + s, err = ToSafeFilePath(append([]string{info.Name}, fileInfo.Path...)...) if err != nil { - return nil, fmt.Errorf("file %v has unsafe path %q: %w", i, fileInfo.Path, err) + err = fmt.Errorf("file %v has unsafe path %q: %w", i, fileInfo.Path, err) + return } f := file{ path: filepath.Join(dir, s), @@ -83,16 +85,21 @@ func (fs *fileClientImpl) OpenTorrent(info *metainfo.Info, infoHash metainfo.Has if f.length == 0 { err = CreateNativeZeroLengthFile(f.path) if err != nil { - return nil, fmt.Errorf("creating zero length file: %w", err) + err = fmt.Errorf("creating zero length file: %w", err) + return } } files = append(files, f) } - return &fileTorrentImpl{ + t := &fileTorrentImpl{ files, segments.NewIndex(common.LengthIterFromUpvertedFiles(upvertedFiles)), infoHash, fs.pc, + } + return TorrentImpl{ + Piece: t.Piece, + Close: t.Close, }, nil } diff --git a/storage/interface.go b/storage/interface.go index 584338cb..a52ee6dc 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -17,9 +17,11 @@ type ClientImpl interface { } // Data storage bound to a torrent. -type TorrentImpl interface { - Piece(metainfo.Piece) PieceImpl - Close() error +type TorrentImpl struct { + Piece func(p metainfo.Piece) PieceImpl + Close func() error + // Storages that share the same value, will provide a pointer to the same function. + Capacity *func() *int64 } // Interacts with torrent piece data. Optional interfaces to implement include: diff --git a/storage/mmap.go b/storage/mmap.go index c4e5b09e..3d996d9e 100644 --- a/storage/mmap.go +++ b/storage/mmap.go @@ -30,14 +30,14 @@ func NewMMapWithCompletion(baseDir string, completion PieceCompletion) *mmapClie } } -func (s *mmapClientImpl) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (t TorrentImpl, err error) { +func (s *mmapClientImpl) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (_ TorrentImpl, err error) { span, err := mMapTorrent(info, s.baseDir) - t = &mmapTorrentStorage{ + t := &mmapTorrentStorage{ infoHash: infoHash, span: span, pc: s.pc, } - return + return TorrentImpl{Piece: t.Piece, Close: t.Close}, err } func (s *mmapClientImpl) Close() error { diff --git a/storage/piece-resource.go b/storage/piece-resource.go index d56280bc..ec3848df 100644 --- a/storage/piece-resource.go +++ b/storage/piece-resource.go @@ -26,6 +26,7 @@ type ResourcePiecesOpts struct { // Sized puts require being able to stream from a statement executed on another connection. // Without them, we buffer the entire read and then put that. NoSizedPuts bool + Capacity *int64 } func NewResourcePieces(p PieceProvider) ClientImpl { @@ -49,10 +50,11 @@ func (piecePerResourceTorrentImpl) Close() error { } func (s piecePerResource) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (TorrentImpl, error) { - return piecePerResourceTorrentImpl{ + t := piecePerResourceTorrentImpl{ s, make([]sync.RWMutex, info.NumPieces()), - }, nil + } + return TorrentImpl{Piece: t.Piece, Close: t.Close}, nil } func (s piecePerResourceTorrentImpl) Piece(p metainfo.Piece) PieceImpl { diff --git a/storage/sqlite/direct.go b/storage/sqlite/direct.go index d51e1321..7748f921 100644 --- a/storage/sqlite/direct.go +++ b/storage/sqlite/direct.go @@ -61,9 +61,24 @@ func NewDirectStorage(opts NewDirectStorageOpts) (_ storage.ClientImplCloser, er if opts.BlobFlushInterval != 0 { cl.blobFlusher = time.AfterFunc(opts.BlobFlushInterval, cl.blobFlusherFunc) } + cl.capacity = cl.getCapacity return cl, nil } +func (cl *client) getCapacity() (ret *int64) { + cl.l.Lock() + defer cl.l.Unlock() + err := sqlitex.Exec(cl.conn, "select value from setting where name='capacity'", func(stmt *sqlite.Stmt) error { + ret = new(int64) + *ret = stmt.ColumnInt64(0) + return nil + }) + if err != nil { + panic(err) + } + return +} + type client struct { l sync.Mutex conn conn @@ -71,6 +86,7 @@ type client struct { blobFlusher *time.Timer opts NewDirectStorageOpts closed bool + capacity func() *int64 } func (c *client) blobFlusherFunc() { @@ -91,7 +107,8 @@ func (c *client) flushBlobs() { } func (c *client) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (storage.TorrentImpl, error) { - return torrent{c}, nil + t := torrent{c} + return storage.TorrentImpl{Piece: t.Piece, Close: t.Close, Capacity: &c.capacity}, nil } func (c *client) Close() error { diff --git a/test/issue377_test.go b/test/issue377_test.go index bd8c4357..7456e9c4 100644 --- a/test/issue377_test.go +++ b/test/issue377_test.go @@ -124,7 +124,7 @@ func (me *diskFullStorage) Close() error { } func (d *diskFullStorage) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (storage.TorrentImpl, error) { - return d, nil + return storage.TorrentImpl{Piece: d.Piece, Close: d.Close}, nil } type pieceImpl struct { diff --git a/test/transfer_test.go b/test/transfer_test.go index e58bb53b..b3187c0b 100644 --- a/test/transfer_test.go +++ b/test/transfer_test.go @@ -12,6 +12,7 @@ import ( "testing/iotest" "time" + "github.com/anacrolix/missinggo/v2/bitmap" "github.com/anacrolix/missinggo/v2/filecache" "github.com/anacrolix/torrent" "github.com/anacrolix/torrent/internal/testutil" @@ -168,7 +169,7 @@ func testClientTransfer(t *testing.T, ps testClientTransferParams) { for _, pc := range leecherPeerConns { completed := pc.PeerPieces().Len() t.Logf("peer conn %v has %v completed pieces", pc, completed) - if completed == leecherTorrent.Info().NumPieces() { + if completed == bitmap.BitRange(leecherTorrent.Info().NumPieces()) { foundSeeder = true } } @@ -192,7 +193,6 @@ func testClientTransfer(t *testing.T, ps testClientTransferParams) { type fileCacheClientStorageFactoryParams struct { Capacity int64 SetCapacity bool - Wrapper func(*filecache.Cache) storage.ClientImplCloser } func newFileCacheClientStorageFactory(ps fileCacheClientStorageFactoryParams) storageFactory { @@ -201,10 +201,22 @@ func newFileCacheClientStorageFactory(ps fileCacheClientStorageFactoryParams) st if err != nil { panic(err) } + var sharedCapacity *int64 if ps.SetCapacity { + sharedCapacity = &ps.Capacity fc.SetCapacity(ps.Capacity) } - return ps.Wrapper(fc) + return struct { + storage.ClientImpl + io.Closer + }{ + storage.NewResourcePiecesOpts( + fc.AsResourceProvider(), + storage.ResourcePiecesOpts{ + Capacity: sharedCapacity, + }), + ioutil.NopCloser(nil), + } } } @@ -212,17 +224,13 @@ type storageFactory func(string) storage.ClientImplCloser func TestClientTransferDefault(t *testing.T) { testClientTransfer(t, testClientTransferParams{ - LeecherStorage: newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{ - Wrapper: fileCachePieceResourceStorage, - }), + LeecherStorage: newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{}), }) } func TestClientTransferDefaultNoMetadata(t *testing.T) { testClientTransfer(t, testClientTransferParams{ - LeecherStorage: newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{ - Wrapper: fileCachePieceResourceStorage, - }), + LeecherStorage: newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{}), LeecherStartsWithoutMetadata: true, }) } @@ -244,16 +252,6 @@ func TestClientTransferRateLimitedDownload(t *testing.T) { }) } -func fileCachePieceResourceStorage(fc *filecache.Cache) storage.ClientImplCloser { - return struct { - storage.ClientImpl - io.Closer - }{ - storage.NewResourcePieces(fc.AsResourceProvider()), - ioutil.NopCloser(nil), - } -} - func testClientTransferSmallCache(t *testing.T, setReadahead bool, readahead int64) { testClientTransfer(t, testClientTransferParams{ LeecherStorage: newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{ @@ -261,7 +259,6 @@ func testClientTransferSmallCache(t *testing.T, setReadahead bool, readahead int // Going below the piece length means it can't complete a piece so // that it can be hashed. Capacity: 5, - Wrapper: fileCachePieceResourceStorage, }), SetReadahead: setReadahead, // Can't readahead too far or the cache will thrash and drop data we @@ -324,9 +321,7 @@ func sqliteLeecherStorageTestCase(numConns int) leecherStorageTestCase { func TestClientTransferVarious(t *testing.T) { // Leecher storage for _, ls := range []leecherStorageTestCase{ - {"Filecache", newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{ - Wrapper: fileCachePieceResourceStorage, - }), 0}, + {"Filecache", newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{}), 0}, {"Boltdb", storage.NewBoltDB, 0}, {"SqliteDirect", func(s string) storage.ClientImplCloser { path := filepath.Join(s, "sqlite3.db") diff --git a/torrent.go b/torrent.go index 82b70c13..b4dda85d 100644 --- a/torrent.go +++ b/torrent.go @@ -12,17 +12,12 @@ import ( "net/http" "net/url" "sort" + "strings" "sync" "text/tabwriter" "time" "unsafe" - "github.com/anacrolix/torrent/common" - "github.com/anacrolix/torrent/segments" - "github.com/anacrolix/torrent/webseed" - "github.com/davecgh/go-spew/spew" - "github.com/pion/datachannel" - "github.com/anacrolix/dht/v2" "github.com/anacrolix/log" "github.com/anacrolix/missinggo" @@ -32,12 +27,18 @@ import ( "github.com/anacrolix/missinggo/slices" "github.com/anacrolix/missinggo/v2/bitmap" "github.com/anacrolix/missinggo/v2/prioritybitmap" + "github.com/anacrolix/multiless" + "github.com/davecgh/go-spew/spew" + "github.com/pion/datachannel" "github.com/anacrolix/torrent/bencode" + "github.com/anacrolix/torrent/common" "github.com/anacrolix/torrent/metainfo" pp "github.com/anacrolix/torrent/peer_protocol" + "github.com/anacrolix/torrent/segments" "github.com/anacrolix/torrent/storage" "github.com/anacrolix/torrent/tracker" + "github.com/anacrolix/torrent/webseed" "github.com/anacrolix/torrent/webtorrent" ) @@ -55,9 +56,6 @@ type Torrent struct { dataUploadDisallowed bool userOnWriteChunkErr func(error) - // Determines what chunks to request from peers. - requestStrategy requestStrategy - closed missinggo.Event infoHash metainfo.Hash pieces []Piece @@ -94,8 +92,7 @@ type Torrent struct { maxEstablishedConns int // Set of addrs to which we're attempting to connect. Connections are // half-open until all handshakes are completed. - halfOpen map[string]PeerInfo - fastestPeer *Peer + halfOpen map[string]PeerInfo // Reserve of peers to connect to. A peer can be both here and in the // active connections if were told about the peer after connecting with @@ -150,6 +147,34 @@ type Torrent struct { pex pexState } +func (t *Torrent) pieceAvailabilityFromPeers(i pieceIndex) (count int) { + t.iterPeers(func(peer *Peer) { + if peer.peerHasPiece(i) { + count++ + } + }) + return +} + +func (t *Torrent) decPieceAvailability(i pieceIndex) { + if !t.haveInfo() { + return + } + p := t.piece(i) + if p.availability <= 0 { + panic(p.availability) + } + p.availability-- +} + +func (t *Torrent) incPieceAvailability(i pieceIndex) { + // If we don't the info, this should be reconciled when we do. + if t.haveInfo() { + p := t.piece(i) + p.availability++ + } +} + func (t *Torrent) numConns() int { return len(t.conns) } @@ -166,15 +191,8 @@ func (t *Torrent) readerReadaheadPieces() bitmap.Bitmap { return t._readerReadaheadPieces } -func (t *Torrent) ignorePieces() bitmap.Bitmap { - ret := t._completedPieces.Copy() - ret.Union(t.piecesQueuedForHash) - for i := 0; i < t.numPieces(); i++ { - if t.piece(i).hashing { - ret.Set(i, true) - } - } - return ret +func (t *Torrent) ignorePieceForRequests(i pieceIndex) bool { + return !t.wantPieceIndex(i) } func (t *Torrent) pendingPieces() *prioritybitmap.PriorityBitmap { @@ -417,8 +435,14 @@ func (t *Torrent) onSetInfo() { p.onGotInfo(t.info) }) for i := range t.pieces { - t.updatePieceCompletion(pieceIndex(i)) p := &t.pieces[i] + // Need to add availability before updating piece completion, as that may result in conns + // being dropped. + if p.availability != 0 { + panic(p.availability) + } + p.availability = int64(t.pieceAvailabilityFromPeers(i)) + t.updatePieceCompletion(pieceIndex(i)) if !p.storageCompletionOk { // t.logger.Printf("piece %s completion unknown, queueing check", p) t.queuePieceCheck(pieceIndex(i)) @@ -536,6 +560,26 @@ func (t *Torrent) newMetadataExtensionMessage(c *PeerConn, msgType int, piece in } } +type pieceAvailabilityRun struct { + count pieceIndex + availability int64 +} + +func (me pieceAvailabilityRun) String() string { + return fmt.Sprintf("%v(%v)", me.count, me.availability) +} + +func (t *Torrent) pieceAvailabilityRuns() (ret []pieceAvailabilityRun) { + rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) { + ret = append(ret, pieceAvailabilityRun{availability: el.(int64), count: int(count)}) + }) + for i := range t.pieces { + rle.Append(t.pieces[i].availability, 1) + } + rle.Flush() + return +} + func (t *Torrent) pieceStateRuns() (ret PieceStateRuns) { rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) { ret = append(ret, PieceStateRun{ @@ -606,17 +650,26 @@ func (t *Torrent) writeStatus(w io.Writer) { } fmt.Fprintln(w) } - fmt.Fprintf(w, "Piece length: %s\n", func() string { - if t.haveInfo() { - return fmt.Sprint(t.usualPieceSize()) - } else { - return "?" - } - }()) + fmt.Fprintf(w, "Piece length: %s\n", + func() string { + if t.haveInfo() { + return fmt.Sprintf("%v (%v chunks)", + t.usualPieceSize(), + float64(t.usualPieceSize())/float64(t.chunkSize)) + } else { + return "no info" + } + }(), + ) if t.info != nil { fmt.Fprintf(w, "Num Pieces: %d (%d completed)\n", t.numPieces(), t.numPiecesCompleted()) - fmt.Fprintf(w, "Piece States: %s", t.pieceStateRuns()) - fmt.Fprintln(w) + fmt.Fprintf(w, "Piece States: %s\n", t.pieceStateRuns()) + fmt.Fprintf(w, "Piece availability: %v\n", strings.Join(func() (ret []string) { + for _, run := range t.pieceAvailabilityRuns() { + ret = append(ret, run.String()) + } + return + }(), " ")) } fmt.Fprintf(w, "Reader Pieces:") t.forReaderOffsetPieces(func(begin, end pieceIndex) (again bool) { @@ -651,8 +704,15 @@ func (t *Torrent) writeStatus(w io.Writer) { spew.Fdump(w, t.statsLocked()) peers := t.peersAsSlice() - sort.Slice(peers, func(i, j int) bool { - return worseConn(peers[i], peers[j]) + sort.Slice(peers, func(_i, _j int) bool { + i := peers[_i] + j := peers[_j] + if less, ok := multiless.New().EagerSameLess( + i.downloadRate() == j.downloadRate(), i.downloadRate() < j.downloadRate(), + ).LessOk(); ok { + return less + } + return worseConn(i, j) }) for i, c := range peers { fmt.Fprintf(w, "%2d. ", i+1) @@ -700,7 +760,7 @@ func (t *Torrent) bytesMissingLocked() int64 { } func (t *Torrent) bytesLeft() (left int64) { - bitmap.Flip(t._completedPieces, 0, bitmap.BitIndex(t.numPieces())).IterTyped(func(piece int) bool { + bitmap.Flip(t._completedPieces, 0, bitmap.BitRange(t.numPieces())).IterTyped(func(piece int) bool { p := &t.pieces[piece] left += int64(p.length() - p.numDirtyBytes()) return true @@ -735,8 +795,8 @@ func (t *Torrent) numPieces() pieceIndex { return pieceIndex(t.info.NumPieces()) } -func (t *Torrent) numPiecesCompleted() (num int) { - return t._completedPieces.Len() +func (t *Torrent) numPiecesCompleted() (num pieceIndex) { + return pieceIndex(t._completedPieces.Len()) } func (t *Torrent) close() (err error) { @@ -746,7 +806,9 @@ func (t *Torrent) close() (err error) { func() { t.storageLock.Lock() defer t.storageLock.Unlock() - t.storage.Close() + if f := t.storage.Close; f != nil { + f() + } }() } t.iterPeers(func(p *Peer) { @@ -844,7 +906,7 @@ func (t *Torrent) haveAllPieces() bool { if !t.haveInfo() { return false } - return t._completedPieces.Len() == bitmap.BitIndex(t.numPieces()) + return t._completedPieces.Len() == bitmap.BitRange(t.numPieces()) } func (t *Torrent) havePiece(index pieceIndex) bool { @@ -890,11 +952,14 @@ func chunkIndex(cs ChunkSpec, chunkSize pp.Integer) int { } func (t *Torrent) wantPieceIndex(index pieceIndex) bool { - if !t.haveInfo() { - return false - } - if index < 0 || index >= t.numPieces() { - return false + // TODO: Are these overly conservative, should we be guarding this here? + { + if !t.haveInfo() { + return false + } + if index < 0 || index >= t.numPieces() { + return false + } } p := &t.pieces[index] if p.queuedForHash() { @@ -906,7 +971,7 @@ func (t *Torrent) wantPieceIndex(index pieceIndex) bool { if t.pieceComplete(index) { return false } - if t._pendingPieces.Contains(bitmap.BitIndex(index)) { + if t._pendingPieces.Contains(int(index)) { return true } // t.logger.Printf("piece %d not pending", index) @@ -965,7 +1030,7 @@ func (t *Torrent) pieceNumPendingChunks(piece pieceIndex) pp.Integer { } func (t *Torrent) pieceAllDirty(piece pieceIndex) bool { - return t.pieces[piece]._dirtyChunks.Len() == int(t.pieceNumChunks(piece)) + return t.pieces[piece]._dirtyChunks.Len() == bitmap.BitRange(t.pieceNumChunks(piece)) } func (t *Torrent) readersChanged() { @@ -1024,11 +1089,11 @@ func (t *Torrent) updatePiecePriority(piece pieceIndex) { newPrio := p.uncachedPriority() // t.logger.Printf("torrent %p: piece %d: uncached priority: %v", t, piece, newPrio) if newPrio == PiecePriorityNone { - if !t._pendingPieces.Remove(bitmap.BitIndex(piece)) { + if !t._pendingPieces.Remove(int(piece)) { return } } else { - if !t._pendingPieces.Set(bitmap.BitIndex(piece), newPrio.BitmapPriority()) { + if !t._pendingPieces.Set(int(piece), newPrio.BitmapPriority()) { return } } @@ -1084,7 +1149,7 @@ func (t *Torrent) forReaderOffsetPieces(f func(begin, end pieceIndex) (more bool } func (t *Torrent) piecePriority(piece pieceIndex) piecePriority { - prio, ok := t._pendingPieces.GetPriority(bitmap.BitIndex(piece)) + prio, ok := t._pendingPieces.GetPriority(piece) if !ok { return PiecePriorityNone } @@ -1232,7 +1297,7 @@ func (t *Torrent) readerPiecePriorities() (now, readahead bitmap.Bitmap) { t.forReaderOffsetPieces(func(begin, end pieceIndex) bool { if end > begin { now.Add(bitmap.BitIndex(begin)) - readahead.AddRange(bitmap.BitIndex(begin)+1, bitmap.BitIndex(end)) + readahead.AddRange(bitmap.BitRange(begin)+1, bitmap.BitRange(end)) } return true }) @@ -1319,6 +1384,16 @@ func (t *Torrent) deletePeerConn(c *PeerConn) (ret bool) { return } +func (t *Torrent) decPeerPieceAvailability(p *Peer) { + if !t.haveInfo() { + return + } + p.newPeerPieces().IterTyped(func(i int) bool { + p.t.decPieceAvailability(i) + return true + }) +} + func (t *Torrent) numActivePeers() (num int) { t.iterPeers(func(*Peer) { num++ @@ -1905,7 +1980,7 @@ func (t *Torrent) tryCreatePieceHasher() bool { return false } p := t.piece(pi) - t.piecesQueuedForHash.Remove(pi) + t.piecesQueuedForHash.Remove(bitmap.BitIndex(pi)) p.hashing = true t.publishPieceChange(pi) t.updatePiecePriority(pi) @@ -2036,30 +2111,6 @@ func (t *Torrent) piece(i int) *Piece { return &t.pieces[i] } -func (t *Torrent) requestStrategyTorrent() requestStrategyTorrent { - return t -} - -type torrentRequestStrategyCallbacks struct { - t *Torrent -} - -func (cb torrentRequestStrategyCallbacks) requestTimedOut(r Request) { - torrent.Add("Request timeouts", 1) - cb.t.cl.lock() - defer cb.t.cl.unlock() - cb.t.iterPeers(func(cn *Peer) { - if cn.peerHasPiece(pieceIndex(r.Index)) { - cn.updateRequests() - } - }) - -} - -func (t *Torrent) requestStrategyCallbacks() requestStrategyCallbacks { - return torrentRequestStrategyCallbacks{t} -} - func (t *Torrent) onWriteChunkErr(err error) { if t.userOnWriteChunkErr != nil { go t.userOnWriteChunkErr(err) @@ -2121,7 +2172,7 @@ func (t *Torrent) SetOnWriteChunkError(f func(error)) { t.userOnWriteChunkErr = f } -func (t *Torrent) iterPeers(f func(*Peer)) { +func (t *Torrent) iterPeers(f func(p *Peer)) { for pc := range t.conns { f(&pc.Peer) } @@ -2154,7 +2205,6 @@ func (t *Torrent) addWebSeed(url string) { outgoing: true, Network: "http", reconciledHandshakeStats: true, - peerSentHaveAll: true, // TODO: Raise this limit, and instead limit concurrent fetches. PeerMaxRequests: 32, RemoteAddr: remoteAddrFromUrl(url), @@ -2180,6 +2230,7 @@ func (t *Torrent) addWebSeed(url string) { ws.onGotInfo(t.info) } t.webSeeds[url] = &ws.peer + ws.peer.onPeerHasAllPieces() } func (t *Torrent) peerIsActive(p *Peer) (active bool) { diff --git a/torrent_test.go b/torrent_test.go index d53aa693..ceff1fee 100644 --- a/torrent_test.go +++ b/torrent_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/anacrolix/missinggo" + "github.com/anacrolix/missinggo/v2/bitmap" "github.com/bradfitz/iter" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -96,8 +97,8 @@ func BenchmarkUpdatePiecePriorities(b *testing.B) { r.Seek(3500000, io.SeekStart) } assert.Len(b, t.readers, 7) - for i := 0; i < int(t.numPieces()); i += 3 { - t._completedPieces.Set(i, true) + for i := 0; i < t.numPieces(); i += 3 { + t._completedPieces.Set(bitmap.BitIndex(i), true) } t.DownloadPieces(0, t.numPieces()) for range iter.N(b.N) { diff --git a/types/types.go b/types/types.go new file mode 100644 index 00000000..a06f7e6a --- /dev/null +++ b/types/types.go @@ -0,0 +1,52 @@ +package types + +import ( + pp "github.com/anacrolix/torrent/peer_protocol" +) + +type PieceIndex = int + +type ChunkSpec struct { + Begin, Length pp.Integer +} + +type Request struct { + Index pp.Integer + ChunkSpec +} + +func (r Request) ToMsg(mt pp.MessageType) pp.Message { + return pp.Message{ + Type: mt, + Index: r.Index, + Begin: r.Begin, + Length: r.Length, + } +} + +// Describes the importance of obtaining a particular piece. +type PiecePriority byte + +func (pp *PiecePriority) Raise(maybe PiecePriority) bool { + if maybe > *pp { + *pp = maybe + return true + } + return false +} + +// Priority for use in PriorityBitmap +func (me PiecePriority) BitmapPriority() int { + return -int(me) +} + +const ( + PiecePriorityNone PiecePriority = iota // Not wanted. Must be the zero value. + PiecePriorityNormal // Wanted. + PiecePriorityHigh // Wanted a lot. + PiecePriorityReadahead // May be required soon. + // Succeeds a piece where a read occurred. Currently the same as Now, + // apparently due to issues with caching. + PiecePriorityNext + PiecePriorityNow // A Reader is reading in this piece. Highest urgency. +) diff --git a/webseed-peer.go b/webseed-peer.go index e2df582e..f2ccbff5 100644 --- a/webseed-peer.go +++ b/webseed-peer.go @@ -24,6 +24,10 @@ type webseedPeer struct { var _ peerImpl = (*webseedPeer)(nil) +func (me *webseedPeer) writeBufferFull() bool { + return false +} + func (me *webseedPeer) connStatusString() string { return me.client.Url } @@ -37,20 +41,15 @@ func (ws *webseedPeer) onGotInfo(info *metainfo.Info) { ws.client.Info = info } -func (ws *webseedPeer) _postCancel(r Request) { - ws.cancel(r) -} - func (ws *webseedPeer) writeInterested(interested bool) bool { return true } -func (ws *webseedPeer) cancel(r Request) bool { +func (ws *webseedPeer) _cancel(r Request) bool { active, ok := ws.activeRequests[r] - if !ok { - return false + if ok { + active.Cancel() } - active.Cancel() return true } @@ -58,7 +57,7 @@ func (ws *webseedPeer) intoSpec(r Request) webseed.RequestSpec { return webseed.RequestSpec{ws.peer.t.requestOffset(r), int64(r.Length)} } -func (ws *webseedPeer) request(r Request) bool { +func (ws *webseedPeer) _request(r Request) bool { ws.requesterCond.Signal() return true } @@ -79,7 +78,7 @@ func (ws *webseedPeer) requester() { defer ws.requesterCond.L.Unlock() start: for !ws.peer.closed.IsSet() { - for r := range ws.peer.requests { + for r := range ws.peer.actualRequestState.Requests { if _, ok := ws.activeRequests[r]; ok { continue } @@ -99,7 +98,6 @@ func (ws *webseedPeer) connectionFlags() string { func (ws *webseedPeer) drop() {} func (ws *webseedPeer) updateRequests() { - ws.peer.doRequestState() } func (ws *webseedPeer) onClose() { @@ -112,6 +110,10 @@ func (ws *webseedPeer) onClose() { func (ws *webseedPeer) requestResultHandler(r Request, webseedRequest webseed.Request) { result := <-webseedRequest.Result + // We do this here rather than inside receiveChunk, since we want to count errors too. I'm not + // sure if we can divine which errors indicate cancellation on our end without hitting the + // network though. + ws.peer.doChunkReadStats(int64(len(result.Bytes))) ws.peer.t.cl.lock() defer ws.peer.t.cl.unlock() if result.Err != nil { @@ -146,3 +148,7 @@ func (ws *webseedPeer) requestResultHandler(r Request, webseedRequest webseed.Re } } } + +func (me *webseedPeer) onNextRequestStateChanged() { + me.peer.applyNextRequestState() +}