gofumpt
This commit is contained in:
parent
f86af21cd2
commit
c6ee03f449
|
@ -224,7 +224,7 @@ func getDictField(dict reflect.Type, key string) (_ dictField, err error) {
|
||||||
mapValue.Set(reflect.MakeMap(dict))
|
mapValue.Set(reflect.MakeMap(dict))
|
||||||
}
|
}
|
||||||
// Assigns the value into the map.
|
// Assigns the value into the map.
|
||||||
//log.Printf("map type: %v", mapValue.Type())
|
// log.Printf("map type: %v", mapValue.Type())
|
||||||
mapValue.SetMapIndex(reflect.ValueOf(key).Convert(dict.Key()), value)
|
mapValue.SetMapIndex(reflect.ValueOf(key).Convert(dict.Key()), value)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -345,7 +345,7 @@ func (d *Decoder) parseDict(v reflect.Value) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
setValue := reflect.New(df.Type).Elem()
|
setValue := reflect.New(df.Type).Elem()
|
||||||
//log.Printf("parsing into %v", setValue.Type())
|
// log.Printf("parsing into %v", setValue.Type())
|
||||||
ok, err = d.parseValue(setValue)
|
ok, err = d.parseValue(setValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var target *UnmarshalTypeError
|
var target *UnmarshalTypeError
|
||||||
|
@ -468,7 +468,6 @@ func (d *Decoder) readOneValue() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Decoder) parseUnmarshaler(v reflect.Value) bool {
|
func (d *Decoder) parseUnmarshaler(v reflect.Value) bool {
|
||||||
|
|
|
@ -23,8 +23,10 @@ var random_decode_tests = []random_decode_test{
|
||||||
{"5:hello", "hello"},
|
{"5:hello", "hello"},
|
||||||
{"29:unicode test проверка", "unicode test проверка"},
|
{"29:unicode test проверка", "unicode test проверка"},
|
||||||
{"d1:ai5e1:b5:helloe", map[string]interface{}{"a": int64(5), "b": "hello"}},
|
{"d1:ai5e1:b5:helloe", map[string]interface{}{"a": int64(5), "b": "hello"}},
|
||||||
{"li5ei10ei15ei20e7:bencodee",
|
{
|
||||||
[]interface{}{int64(5), int64(10), int64(15), int64(20), "bencode"}},
|
"li5ei10ei15ei20e7:bencodee",
|
||||||
|
[]interface{}{int64(5), int64(10), int64(15), int64(20), "bencode"},
|
||||||
|
},
|
||||||
{"ldedee", []interface{}{map[string]interface{}{}, map[string]interface{}{}}},
|
{"ldedee", []interface{}{map[string]interface{}{}, map[string]interface{}{}}},
|
||||||
{"le", []interface{}{}},
|
{"le", []interface{}{}},
|
||||||
{"i604919719469385652980544193299329427705624352086e", func() *big.Int {
|
{"i604919719469385652980544193299329427705624352086e", func() *big.Int {
|
||||||
|
@ -135,7 +137,6 @@ func TestUnmarshalerBencode(t *testing.T) {
|
||||||
assert_equal(t, ss[0].x, "5:hello")
|
assert_equal(t, ss[0].x, "5:hello")
|
||||||
assert_equal(t, ss[1].x, "5:fruit")
|
assert_equal(t, ss[1].x, "5:fruit")
|
||||||
assert_equal(t, ss[2].x, "3:way")
|
assert_equal(t, ss[2].x, "3:way")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIgnoreUnmarshalTypeError(t *testing.T) {
|
func TestIgnoreUnmarshalTypeError(t *testing.T) {
|
||||||
|
|
|
@ -101,7 +101,6 @@ func (e *Encoder) reflectMarshaler(v reflect.Value) bool {
|
||||||
var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem()
|
var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem()
|
||||||
|
|
||||||
func (e *Encoder) reflectValue(v reflect.Value) {
|
func (e *Encoder) reflectValue(v reflect.Value) {
|
||||||
|
|
||||||
if e.reflectMarshaler(v) {
|
if e.reflectMarshaler(v) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -477,7 +477,6 @@ func (cl *Client) rejectAccepted(conn net.Conn) error {
|
||||||
}
|
}
|
||||||
if cl.config.DisableIPv4 && len(rip) == net.IPv4len {
|
if cl.config.DisableIPv4 && len(rip) == net.IPv4len {
|
||||||
return errors.New("ipv4 disabled")
|
return errors.New("ipv4 disabled")
|
||||||
|
|
||||||
}
|
}
|
||||||
if cl.config.DisableIPv6 && len(rip) == net.IPv6len && rip.To4() == nil {
|
if cl.config.DisableIPv6 && len(rip) == net.IPv6len && rip.To4() == nil {
|
||||||
return errors.New("ipv6 disabled")
|
return errors.New("ipv6 disabled")
|
||||||
|
@ -735,7 +734,7 @@ func (cl *Client) establishOutgoingConn(t *Torrent, addr PeerRemoteAddr) (c *Pee
|
||||||
torrent.Add("initiated conn with preferred header obfuscation", 1)
|
torrent.Add("initiated conn with preferred header obfuscation", 1)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
//cl.logger.Printf("error establishing connection to %s (obfuscatedHeader=%t): %v", addr, obfuscatedHeaderFirst, err)
|
// cl.logger.Printf("error establishing connection to %s (obfuscatedHeader=%t): %v", addr, obfuscatedHeaderFirst, err)
|
||||||
if cl.config.HeaderObfuscationPolicy.RequirePreferred {
|
if cl.config.HeaderObfuscationPolicy.RequirePreferred {
|
||||||
// We should have just tried with the preferred header obfuscation. If it was required,
|
// We should have just tried with the preferred header obfuscation. If it was required,
|
||||||
// there's nothing else to try.
|
// there's nothing else to try.
|
||||||
|
@ -746,7 +745,7 @@ func (cl *Client) establishOutgoingConn(t *Torrent, addr PeerRemoteAddr) (c *Pee
|
||||||
if err == nil {
|
if err == nil {
|
||||||
torrent.Add("initiated conn with fallback header obfuscation", 1)
|
torrent.Add("initiated conn with fallback header obfuscation", 1)
|
||||||
}
|
}
|
||||||
//cl.logger.Printf("error establishing fallback connection to %v: %v", addr, err)
|
// cl.logger.Printf("error establishing fallback connection to %v: %v", addr, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -648,7 +648,7 @@ func TestSetMaxEstablishedConn(t *testing.T) {
|
||||||
// Creates a file containing its own name as data. Make a metainfo from that, adds it to the given
|
// Creates a file containing its own name as data. Make a metainfo from that, adds it to the given
|
||||||
// client, and returns a magnet link.
|
// client, and returns a magnet link.
|
||||||
func makeMagnet(t *testing.T, cl *Client, dir string, name string) string {
|
func makeMagnet(t *testing.T, cl *Client, dir string, name string) string {
|
||||||
os.MkdirAll(dir, 0770)
|
os.MkdirAll(dir, 0o770)
|
||||||
file, err := os.Create(filepath.Join(dir, name))
|
file, err := os.Create(filepath.Join(dir, name))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
file.Write([]byte(name))
|
file.Write([]byte(name))
|
||||||
|
@ -688,7 +688,7 @@ func testSeederLeecherPair(t *testing.T, seeder func(*ClientConfig), leecher fun
|
||||||
cfg := TestingConfig(t)
|
cfg := TestingConfig(t)
|
||||||
cfg.Seed = true
|
cfg.Seed = true
|
||||||
cfg.DataDir = filepath.Join(cfg.DataDir, "server")
|
cfg.DataDir = filepath.Join(cfg.DataDir, "server")
|
||||||
os.Mkdir(cfg.DataDir, 0755)
|
os.Mkdir(cfg.DataDir, 0o755)
|
||||||
seeder(cfg)
|
seeder(cfg)
|
||||||
server, err := NewClient(cfg)
|
server, err := NewClient(cfg)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -10,13 +10,11 @@ import (
|
||||||
"github.com/anacrolix/torrent/metainfo"
|
"github.com/anacrolix/torrent/metainfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var builtinAnnounceList = [][]string{
|
||||||
builtinAnnounceList = [][]string{
|
{"http://p4p.arenabg.com:1337/announce"},
|
||||||
{"http://p4p.arenabg.com:1337/announce"},
|
{"udp://tracker.opentrackr.org:1337/announce"},
|
||||||
{"udp://tracker.opentrackr.org:1337/announce"},
|
{"udp://tracker.openbittorrent.com:6969/announce"},
|
||||||
{"udp://tracker.openbittorrent.com:6969/announce"},
|
}
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
log.SetFlags(log.Flags() | log.Lshortfile)
|
log.SetFlags(log.Flags() | log.Lshortfile)
|
||||||
|
|
|
@ -78,7 +78,7 @@ func dstFileName(picked string) string {
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
||||||
var rootGroup = struct {
|
rootGroup := struct {
|
||||||
Client *torrent.ClientConfig `group:"Client Options"`
|
Client *torrent.ClientConfig `group:"Client Options"`
|
||||||
TestPeers []string `long:"test-peer" description:"address of peer to inject to every torrent"`
|
TestPeers []string `long:"test-peer" description:"address of peer to inject to every torrent"`
|
||||||
Pick string `long:"pick" description:"filename to pick"`
|
Pick string `long:"pick" description:"filename to pick"`
|
||||||
|
|
|
@ -64,7 +64,7 @@ func verifyTorrent(info *metainfo.Info, root string) error {
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
log.SetFlags(log.Flags() | log.Lshortfile)
|
log.SetFlags(log.Flags() | log.Lshortfile)
|
||||||
var flags = struct {
|
flags := struct {
|
||||||
DataDir string
|
DataDir string
|
||||||
tagflag.StartPos
|
tagflag.StartPos
|
||||||
TorrentFile string
|
TorrentFile string
|
||||||
|
|
|
@ -256,7 +256,7 @@ func downloadErr(flags downloadFlags) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("creating client: %w", err)
|
return fmt.Errorf("creating client: %w", err)
|
||||||
}
|
}
|
||||||
var clientClose sync.Once //In certain situations, close was being called more than once.
|
var clientClose sync.Once // In certain situations, close was being called more than once.
|
||||||
defer clientClose.Do(func() { client.Close() })
|
defer clientClose.Do(func() { client.Close() })
|
||||||
go exitSignalHandlers(&stop)
|
go exitSignalHandlers(&stop)
|
||||||
go func() {
|
go func() {
|
||||||
|
|
|
@ -24,28 +24,26 @@ import (
|
||||||
"github.com/anacrolix/torrent/util/dirwatch"
|
"github.com/anacrolix/torrent/util/dirwatch"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var args = struct {
|
||||||
args = struct {
|
MetainfoDir string `help:"torrent files in this location describe the contents of the mounted filesystem"`
|
||||||
MetainfoDir string `help:"torrent files in this location describe the contents of the mounted filesystem"`
|
DownloadDir string `help:"location to save torrent data"`
|
||||||
DownloadDir string `help:"location to save torrent data"`
|
MountDir string `help:"location the torrent contents are made available"`
|
||||||
MountDir string `help:"location the torrent contents are made available"`
|
|
||||||
|
|
||||||
DisableTrackers bool
|
DisableTrackers bool
|
||||||
TestPeer *net.TCPAddr
|
TestPeer *net.TCPAddr
|
||||||
ReadaheadBytes tagflag.Bytes
|
ReadaheadBytes tagflag.Bytes
|
||||||
ListenAddr *net.TCPAddr
|
ListenAddr *net.TCPAddr
|
||||||
}{
|
}{
|
||||||
MetainfoDir: func() string {
|
MetainfoDir: func() string {
|
||||||
_user, err := user.Current()
|
_user, err := user.Current()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return filepath.Join(_user.HomeDir, ".config/transmission/torrents")
|
return filepath.Join(_user.HomeDir, ".config/transmission/torrents")
|
||||||
}(),
|
}(),
|
||||||
ReadaheadBytes: 10 << 20,
|
ReadaheadBytes: 10 << 20,
|
||||||
ListenAddr: &net.TCPAddr{},
|
ListenAddr: &net.TCPAddr{},
|
||||||
}
|
}
|
||||||
)
|
|
||||||
|
|
||||||
func exitSignalHandlers(fs *torrentfs.TorrentFS) {
|
func exitSignalHandlers(fs *torrentfs.TorrentFS) {
|
||||||
c := make(chan os.Signal, 1)
|
c := make(chan os.Signal, 1)
|
||||||
|
|
|
@ -202,8 +202,8 @@ func NewDefaultClientConfig() *ClientConfig {
|
||||||
Extensions: defaultPeerExtensionBytes(),
|
Extensions: defaultPeerExtensionBytes(),
|
||||||
AcceptPeerConnections: true,
|
AcceptPeerConnections: true,
|
||||||
}
|
}
|
||||||
//cc.ConnTracker.SetNoMaxEntries()
|
// cc.ConnTracker.SetNoMaxEntries()
|
||||||
//cc.ConnTracker.Timeout = func(conntrack.Entry) time.Duration { return 0 }
|
// cc.ConnTracker.Timeout = func(conntrack.Entry) time.Duration { return 0 }
|
||||||
return cc
|
return cc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,7 +40,6 @@ func (me testFileBytesLeft) Run(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFileBytesLeft(t *testing.T) {
|
func TestFileBytesLeft(t *testing.T) {
|
||||||
|
|
||||||
testFileBytesLeft{
|
testFileBytesLeft{
|
||||||
usualPieceSize: 3,
|
usualPieceSize: 3,
|
||||||
firstPieceIndex: 1,
|
firstPieceIndex: 1,
|
||||||
|
|
|
@ -45,7 +45,7 @@ func (me fileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse
|
||||||
me.fn.FS.mu.Unlock()
|
me.fn.FS.mu.Unlock()
|
||||||
var n int
|
var n int
|
||||||
r := missinggo.ContextedReader{r, ctx}
|
r := missinggo.ContextedReader{r, ctx}
|
||||||
//log.Printf("reading %v bytes at %v", len(resp.Data), req.Offset)
|
// log.Printf("reading %v bytes at %v", len(resp.Data), req.Offset)
|
||||||
if true {
|
if true {
|
||||||
// A user reported on that on freebsd 12.2, the system requires that reads are
|
// A user reported on that on freebsd 12.2, the system requires that reads are
|
||||||
// completely filled. Their system only asks for 64KiB at a time. I've seen systems that
|
// completely filled. Their system only asks for 64KiB at a time. I've seen systems that
|
||||||
|
|
|
@ -14,9 +14,7 @@ type fileNode struct {
|
||||||
f *torrent.File
|
f *torrent.File
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var _ fusefs.NodeOpener = fileNode{}
|
||||||
_ fusefs.NodeOpener = fileNode{}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (fn fileNode) Attr(ctx context.Context, attr *fuse.Attr) error {
|
func (fn fileNode) Attr(ctx context.Context, attr *fuse.Attr) error {
|
||||||
attr.Size = uint64(fn.f.Length())
|
attr.Size = uint64(fn.f.Length())
|
||||||
|
|
|
@ -15,12 +15,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultMode = 0555
|
defaultMode = 0o555
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var torrentfsReadRequests = expvar.NewInt("torrentfsReadRequests")
|
||||||
torrentfsReadRequests = expvar.NewInt("torrentfsReadRequests")
|
|
||||||
)
|
|
||||||
|
|
||||||
type TorrentFS struct {
|
type TorrentFS struct {
|
||||||
Client *torrent.Client
|
Client *torrent.Client
|
||||||
|
@ -55,9 +53,7 @@ type dirNode struct {
|
||||||
node
|
node
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var _ fusefs.HandleReadDirAller = dirNode{}
|
||||||
_ fusefs.HandleReadDirAller = dirNode{}
|
|
||||||
)
|
|
||||||
|
|
||||||
func isSubPath(parent, child string) bool {
|
func isSubPath(parent, child string) bool {
|
||||||
if parent == "" {
|
if parent == "" {
|
||||||
|
|
|
@ -68,9 +68,9 @@ func newGreetingLayout() (tl testLayout, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
tl.Completed = filepath.Join(tl.BaseDir, "completed")
|
tl.Completed = filepath.Join(tl.BaseDir, "completed")
|
||||||
os.Mkdir(tl.Completed, 0777)
|
os.Mkdir(tl.Completed, 0o777)
|
||||||
tl.MountDir = filepath.Join(tl.BaseDir, "mnt")
|
tl.MountDir = filepath.Join(tl.BaseDir, "mnt")
|
||||||
os.Mkdir(tl.MountDir, 0777)
|
os.Mkdir(tl.MountDir, 0o777)
|
||||||
testutil.CreateDummyTorrentData(tl.Completed)
|
testutil.CreateDummyTorrentData(tl.Completed)
|
||||||
tl.Metainfo = testutil.GreetingMetaInfo()
|
tl.Metainfo = testutil.GreetingMetaInfo()
|
||||||
return
|
return
|
||||||
|
|
|
@ -12,7 +12,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var flags = struct {
|
flags := struct {
|
||||||
tagflag.StartPos
|
tagflag.StartPos
|
||||||
Ips []net.IP
|
Ips []net.IP
|
||||||
}{}
|
}{}
|
||||||
|
|
|
@ -59,6 +59,7 @@ var (
|
||||||
func (h *Hash) UnmarshalText(b []byte) error {
|
func (h *Hash) UnmarshalText(b []byte) error {
|
||||||
return h.FromHexString(string(b))
|
return h.FromHexString(string(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h Hash) MarshalText() (text []byte, err error) {
|
func (h Hash) MarshalText() (text []byte, err error) {
|
||||||
return []byte(h.HexString()), nil
|
return []byte(h.HexString()), nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,6 @@ func TestParseMagnetURI(t *testing.T) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("Failed to detect broken Magnet URI: %v", uri)
|
t.Errorf("Failed to detect broken Magnet URI: %v", uri)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMagnetize(t *testing.T) {
|
func TestMagnetize(t *testing.T) {
|
||||||
|
|
|
@ -10,9 +10,7 @@ import (
|
||||||
|
|
||||||
type Node string
|
type Node string
|
||||||
|
|
||||||
var (
|
var _ bencode.Unmarshaler = (*Node)(nil)
|
||||||
_ bencode.Unmarshaler = (*Node)(nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
func (n *Node) UnmarshalBencode(b []byte) (err error) {
|
func (n *Node) UnmarshalBencode(b []byte) (err error) {
|
||||||
var iface interface{}
|
var iface interface{}
|
||||||
|
|
|
@ -6,9 +6,7 @@ import (
|
||||||
|
|
||||||
type UrlList []string
|
type UrlList []string
|
||||||
|
|
||||||
var (
|
var _ bencode.Unmarshaler = (*UrlList)(nil)
|
||||||
_ bencode.Unmarshaler = (*UrlList)(nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
func (me *UrlList) UnmarshalBencode(b []byte) error {
|
func (me *UrlList) UnmarshalBencode(b []byte) error {
|
||||||
if len(b) == 0 {
|
if len(b) == 0 {
|
||||||
|
|
|
@ -44,11 +44,11 @@ func (me *MMapSpan) InitIndex() {
|
||||||
i++
|
i++
|
||||||
return l, true
|
return l, true
|
||||||
})
|
})
|
||||||
//log.Printf("made mmapspan index: %v", me.segmentLocater)
|
// log.Printf("made mmapspan index: %v", me.segmentLocater)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *MMapSpan) ReadAt(p []byte, off int64) (n int, err error) {
|
func (ms *MMapSpan) ReadAt(p []byte, off int64) (n int, err error) {
|
||||||
//log.Printf("reading %v bytes at %v", len(p), off)
|
// log.Printf("reading %v bytes at %v", len(p), off)
|
||||||
ms.mu.RLock()
|
ms.mu.RLock()
|
||||||
defer ms.mu.RUnlock()
|
defer ms.mu.RUnlock()
|
||||||
n = ms.locateCopy(func(a, b []byte) (_, _ []byte) { return a, b }, p, off)
|
n = ms.locateCopy(func(a, b []byte) (_, _ []byte) { return a, b }, p, off)
|
||||||
|
@ -65,7 +65,7 @@ func copyBytes(dst, src []byte) int {
|
||||||
func (ms *MMapSpan) locateCopy(copyArgs func(remainingArgument, mmapped []byte) (dst, src []byte), p []byte, off int64) (n int) {
|
func (ms *MMapSpan) locateCopy(copyArgs func(remainingArgument, mmapped []byte) (dst, src []byte), p []byte, off int64) (n int) {
|
||||||
ms.segmentLocater.Locate(segments.Extent{off, int64(len(p))}, func(i int, e segments.Extent) bool {
|
ms.segmentLocater.Locate(segments.Extent{off, int64(len(p))}, func(i int, e segments.Extent) bool {
|
||||||
mMapBytes := ms.mMaps[i][e.Start:]
|
mMapBytes := ms.mMaps[i][e.Start:]
|
||||||
//log.Printf("got segment %v: %v, copying %v, %v", i, e, len(p), len(mMapBytes))
|
// log.Printf("got segment %v: %v, copying %v, %v", i, e, len(p), len(mMapBytes))
|
||||||
_n := copyBytes(copyArgs(p, mMapBytes))
|
_n := copyBytes(copyArgs(p, mMapBytes))
|
||||||
p = p[_n:]
|
p = p[_n:]
|
||||||
n += _n
|
n += _n
|
||||||
|
|
|
@ -20,7 +20,7 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func mainErr() error {
|
func mainErr() error {
|
||||||
var args = struct {
|
args := struct {
|
||||||
CryptoMethod mse.CryptoMethod
|
CryptoMethod mse.CryptoMethod
|
||||||
Dial *struct {
|
Dial *struct {
|
||||||
Network string `arg:"positional"`
|
Network string `arg:"positional"`
|
||||||
|
|
|
@ -260,7 +260,7 @@ func BenchmarkSkeysReceive(b *testing.B) {
|
||||||
}
|
}
|
||||||
fillRand(b, skeys...)
|
fillRand(b, skeys...)
|
||||||
initSkey := skeys[len(skeys)/2]
|
initSkey := skeys[len(skeys)/2]
|
||||||
//c := qt.New(b)
|
// c := qt.New(b)
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i += 1 {
|
for i := 0; i < b.N; i += 1 {
|
||||||
|
|
|
@ -481,7 +481,6 @@ func (cn *Peer) totalExpectingTime() (ret time.Duration) {
|
||||||
ret += time.Since(cn.lastStartedExpectingToReceiveChunks)
|
ret += time.Since(cn.lastStartedExpectingToReceiveChunks)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cn *PeerConn) onPeerSentCancel(r Request) {
|
func (cn *PeerConn) onPeerSentCancel(r Request) {
|
||||||
|
@ -961,7 +960,7 @@ func (c *PeerConn) onReadRequest(r Request) error {
|
||||||
value := &peerRequestState{}
|
value := &peerRequestState{}
|
||||||
c.peerRequests[r] = value
|
c.peerRequests[r] = value
|
||||||
go c.peerRequestDataReader(r, value)
|
go c.peerRequestDataReader(r, value)
|
||||||
//c.tickleWriter()
|
// c.tickleWriter()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1235,7 +1234,7 @@ func (c *PeerConn) onReadExtendedMsg(id pp.ExtensionNumber, payload []byte) (err
|
||||||
if cb := c.callbacks.ReadExtendedHandshake; cb != nil {
|
if cb := c.callbacks.ReadExtendedHandshake; cb != nil {
|
||||||
cb(c, &d)
|
cb(c, &d)
|
||||||
}
|
}
|
||||||
//c.logger.WithDefaultLevel(log.Debug).Printf("received extended handshake message:\n%s", spew.Sdump(d))
|
// c.logger.WithDefaultLevel(log.Debug).Printf("received extended handshake message:\n%s", spew.Sdump(d))
|
||||||
if d.Reqq != 0 {
|
if d.Reqq != 0 {
|
||||||
c.PeerMaxRequests = d.Reqq
|
c.PeerMaxRequests = d.Reqq
|
||||||
}
|
}
|
||||||
|
@ -1346,7 +1345,7 @@ func (c *Peer) receiveChunk(msg *pp.Message) error {
|
||||||
|
|
||||||
// Do we actually want this chunk?
|
// Do we actually want this chunk?
|
||||||
if t.haveChunk(ppReq) {
|
if t.haveChunk(ppReq) {
|
||||||
//panic(fmt.Sprintf("%+v", ppReq))
|
// panic(fmt.Sprintf("%+v", ppReq))
|
||||||
chunksReceived.Add("wasted", 1)
|
chunksReceived.Add("wasted", 1)
|
||||||
c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadWasted }))
|
c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadWasted }))
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -28,7 +28,7 @@ func TestSendBitfieldThenHave(t *testing.T) {
|
||||||
t.Log(err)
|
t.Log(err)
|
||||||
}
|
}
|
||||||
r, w := io.Pipe()
|
r, w := io.Pipe()
|
||||||
//c.r = r
|
// c.r = r
|
||||||
c.w = w
|
c.w = w
|
||||||
c.startWriter()
|
c.startWriter()
|
||||||
c.locker().Lock()
|
c.locker().Lock()
|
||||||
|
@ -155,7 +155,7 @@ func TestConnPexPeerFlags(t *testing.T) {
|
||||||
tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
|
tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
|
||||||
udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
|
udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
|
||||||
)
|
)
|
||||||
var testcases = []struct {
|
testcases := []struct {
|
||||||
conn *PeerConn
|
conn *PeerConn
|
||||||
f pp.PexPeerFlags
|
f pp.PexPeerFlags
|
||||||
}{
|
}{
|
||||||
|
@ -181,7 +181,7 @@ func TestConnPexEvent(t *testing.T) {
|
||||||
dialTcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4747}
|
dialTcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4747}
|
||||||
dialUdpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4747}
|
dialUdpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4747}
|
||||||
)
|
)
|
||||||
var testcases = []struct {
|
testcases := []struct {
|
||||||
t pexEventType
|
t pexEventType
|
||||||
c *PeerConn
|
c *PeerConn
|
||||||
e pexEvent
|
e pexEvent
|
||||||
|
|
4
piece.go
4
piece.go
|
@ -164,10 +164,10 @@ func (p *Piece) VerifyData() {
|
||||||
if p.hashing {
|
if p.hashing {
|
||||||
target++
|
target++
|
||||||
}
|
}
|
||||||
//log.Printf("target: %d", target)
|
// log.Printf("target: %d", target)
|
||||||
p.t.queuePieceCheck(p.index)
|
p.t.queuePieceCheck(p.index)
|
||||||
for {
|
for {
|
||||||
//log.Printf("got %d verifies", p.numVerifies)
|
// log.Printf("got %d verifies", p.numVerifies)
|
||||||
if p.numVerifies >= target {
|
if p.numVerifies >= target {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -330,7 +330,7 @@ func allocatePendingChunks(p requestablePiece, peers []*requestsPeer) {
|
||||||
sortPeersForPiece := func(req *RequestIndex) {
|
sortPeersForPiece := func(req *RequestIndex) {
|
||||||
peersForPieceSorter.req = req
|
peersForPieceSorter.req = req
|
||||||
sort.Sort(&peersForPieceSorter)
|
sort.Sort(&peersForPieceSorter)
|
||||||
//ensureValidSortedPeersForPieceRequests(&peersForPieceSorter)
|
// ensureValidSortedPeersForPieceRequests(&peersForPieceSorter)
|
||||||
}
|
}
|
||||||
// Chunks can be preassigned several times, if peers haven't been able to update their "actual"
|
// Chunks can be preassigned several times, if peers haven't been able to update their "actual"
|
||||||
// with "next" request state before another request strategy run occurs.
|
// with "next" request state before another request strategy run occurs.
|
||||||
|
|
|
@ -247,7 +247,8 @@ func TestDontStealUnnecessarily(t *testing.T) {
|
||||||
Request: true,
|
Request: true,
|
||||||
NumPendingChunks: 9,
|
NumPendingChunks: 9,
|
||||||
IterPendingChunks: chunkIterRange(9),
|
IterPendingChunks: chunkIterRange(9),
|
||||||
}},
|
},
|
||||||
|
},
|
||||||
Peers: []Peer{
|
Peers: []Peer{
|
||||||
firstStealer,
|
firstStealer,
|
||||||
stealee,
|
stealee,
|
||||||
|
|
|
@ -116,8 +116,10 @@ func (p *peerId) GobDecode(b []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type RequestIndex = request_strategy.RequestIndex
|
type (
|
||||||
type chunkIndexType = request_strategy.ChunkIndex
|
RequestIndex = request_strategy.RequestIndex
|
||||||
|
chunkIndexType = request_strategy.ChunkIndex
|
||||||
|
)
|
||||||
|
|
||||||
type peerRequests struct {
|
type peerRequests struct {
|
||||||
requestIndexes []RequestIndex
|
requestIndexes []RequestIndex
|
||||||
|
|
|
@ -28,7 +28,6 @@ func TestLogExampleRequestMapOrdering(t *testing.T) {
|
||||||
for k := range makeTypicalRequests() {
|
for k := range makeTypicalRequests() {
|
||||||
t.Log(k)
|
t.Log(k)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRequestMapOrderingPersistent(t *testing.T) {
|
func TestRequestMapOrderingPersistent(t *testing.T) {
|
||||||
|
|
|
@ -67,7 +67,8 @@ func testLocater(t *testing.T, newLocater newLocater) {
|
||||||
{0, 1554},
|
{0, 1554},
|
||||||
{0, 1618},
|
{0, 1618},
|
||||||
{0, 1546},
|
{0, 1546},
|
||||||
{0, 8500}})
|
{0, 8500},
|
||||||
|
})
|
||||||
assertLocate(t, newLocater,
|
assertLocate(t, newLocater,
|
||||||
[]Length{1652, 1514, 1554, 1618, 1546, 129241752, 1537, 1536, 1551}, // 128737588
|
[]Length{1652, 1514, 1554, 1618, 1546, 129241752, 1537, 1536, 1551}, // 128737588
|
||||||
Extent{129236992, 16384},
|
Extent{129236992, 16384},
|
||||||
|
@ -76,7 +77,8 @@ func testLocater(t *testing.T, newLocater newLocater) {
|
||||||
{129229108, 12644},
|
{129229108, 12644},
|
||||||
{0, 1537},
|
{0, 1537},
|
||||||
{0, 1536},
|
{0, 1536},
|
||||||
{0, 667}})
|
{0, 667},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestScan(t *testing.T) {
|
func TestScan(t *testing.T) {
|
||||||
|
|
|
@ -19,9 +19,7 @@ const (
|
||||||
boltDbIncompleteValue = "i"
|
boltDbIncompleteValue = "i"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var completionBucketKey = []byte("completion")
|
||||||
completionBucketKey = []byte("completion")
|
|
||||||
)
|
|
||||||
|
|
||||||
type boltPieceCompletion struct {
|
type boltPieceCompletion struct {
|
||||||
db *bbolt.DB
|
db *bbolt.DB
|
||||||
|
@ -30,9 +28,9 @@ type boltPieceCompletion struct {
|
||||||
var _ PieceCompletion = (*boltPieceCompletion)(nil)
|
var _ PieceCompletion = (*boltPieceCompletion)(nil)
|
||||||
|
|
||||||
func NewBoltPieceCompletion(dir string) (ret PieceCompletion, err error) {
|
func NewBoltPieceCompletion(dir string) (ret PieceCompletion, err error) {
|
||||||
os.MkdirAll(dir, 0750)
|
os.MkdirAll(dir, 0o750)
|
||||||
p := filepath.Join(dir, ".torrent.bolt.db")
|
p := filepath.Join(dir, ".torrent.bolt.db")
|
||||||
db, err := bbolt.Open(p, 0660, &bbolt.Options{
|
db, err := bbolt.Open(p, 0o660, &bbolt.Options{
|
||||||
Timeout: time.Second,
|
Timeout: time.Second,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -51,6 +51,7 @@ func (me *boltPiece) MarkComplete() error {
|
||||||
func (me *boltPiece) MarkNotComplete() error {
|
func (me *boltPiece) MarkNotComplete() error {
|
||||||
return me.pc().Set(me.pk(), false)
|
return me.pc().Set(me.pk(), false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (me *boltPiece) ReadAt(b []byte, off int64) (n int, err error) {
|
func (me *boltPiece) ReadAt(b []byte, off int64) (n int, err error) {
|
||||||
err = me.db.View(func(tx *bbolt.Tx) error {
|
err = me.db.View(func(tx *bbolt.Tx) error {
|
||||||
db := tx.Bucket(dataBucketKey)
|
db := tx.Bucket(dataBucketKey)
|
||||||
|
|
|
@ -30,7 +30,7 @@ type boltTorrent struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBoltDB(filePath string) ClientImplCloser {
|
func NewBoltDB(filePath string) ClientImplCloser {
|
||||||
db, err := bbolt.Open(filepath.Join(filePath, "bolt.db"), 0600, &bbolt.Options{
|
db, err := bbolt.Open(filepath.Join(filePath, "bolt.db"), 0o600, &bbolt.Options{
|
||||||
Timeout: time.Second,
|
Timeout: time.Second,
|
||||||
})
|
})
|
||||||
expect.Nil(err)
|
expect.Nil(err)
|
||||||
|
|
|
@ -126,7 +126,7 @@ func (fs *fileTorrentImpl) Close() error {
|
||||||
// writes will ever occur to them (no torrent data is associated with a zero-length file). The
|
// writes will ever occur to them (no torrent data is associated with a zero-length file). The
|
||||||
// caller should make sure the file name provided is safe/sanitized.
|
// caller should make sure the file name provided is safe/sanitized.
|
||||||
func CreateNativeZeroLengthFile(name string) error {
|
func CreateNativeZeroLengthFile(name string) error {
|
||||||
os.MkdirAll(filepath.Dir(name), 0777)
|
os.MkdirAll(filepath.Dir(name), 0o777)
|
||||||
var f io.Closer
|
var f io.Closer
|
||||||
f, err := os.Create(name)
|
f, err := os.Create(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -185,18 +185,18 @@ func (fst fileTorrentImplIO) ReadAt(b []byte, off int64) (n int, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fst fileTorrentImplIO) WriteAt(p []byte, off int64) (n int, err error) {
|
func (fst fileTorrentImplIO) WriteAt(p []byte, off int64) (n int, err error) {
|
||||||
//log.Printf("write at %v: %v bytes", off, len(p))
|
// log.Printf("write at %v: %v bytes", off, len(p))
|
||||||
fst.fts.segmentLocater.Locate(segments.Extent{off, int64(len(p))}, func(i int, e segments.Extent) bool {
|
fst.fts.segmentLocater.Locate(segments.Extent{off, int64(len(p))}, func(i int, e segments.Extent) bool {
|
||||||
name := fst.fts.files[i].path
|
name := fst.fts.files[i].path
|
||||||
os.MkdirAll(filepath.Dir(name), 0777)
|
os.MkdirAll(filepath.Dir(name), 0o777)
|
||||||
var f *os.File
|
var f *os.File
|
||||||
f, err = os.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0666)
|
f, err = os.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0o666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
var n1 int
|
var n1 int
|
||||||
n1, err = f.WriteAt(p[:e.Length], e.Start)
|
n1, err = f.WriteAt(p[:e.Length], e.Start)
|
||||||
//log.Printf("%v %v wrote %v: %v", i, e, n1, err)
|
// log.Printf("%v %v wrote %v: %v", i, e, n1, err)
|
||||||
closeErr := f.Close()
|
closeErr := f.Close()
|
||||||
n += n1
|
n += n1
|
||||||
p = p[n1:]
|
p = p[n1:]
|
||||||
|
|
|
@ -132,13 +132,13 @@ func mMapTorrent(md *metainfo.Info, location string) (mms *mmap_span.MMapSpan, e
|
||||||
|
|
||||||
func mmapFile(name string, size int64) (ret mmap.MMap, err error) {
|
func mmapFile(name string, size int64) (ret mmap.MMap, err error) {
|
||||||
dir := filepath.Dir(name)
|
dir := filepath.Dir(name)
|
||||||
err = os.MkdirAll(dir, 0750)
|
err = os.MkdirAll(dir, 0o750)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("making directory %q: %s", dir, err)
|
err = fmt.Errorf("making directory %q: %s", dir, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var file *os.File
|
var file *os.File
|
||||||
file, err = os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0666)
|
file, err = os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0o666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,8 +24,10 @@ var safeFilePathTests = []struct {
|
||||||
{input: []string{"a", filepath.FromSlash(`b/..`)}, expectErr: false},
|
{input: []string{"a", filepath.FromSlash(`b/..`)}, expectErr: false},
|
||||||
{input: []string{"a", filepath.FromSlash(`b/../../..`)}, expectErr: true},
|
{input: []string{"a", filepath.FromSlash(`b/../../..`)}, expectErr: true},
|
||||||
{input: []string{"a", filepath.FromSlash(`b/../.././..`)}, expectErr: true},
|
{input: []string{"a", filepath.FromSlash(`b/../.././..`)}, expectErr: true},
|
||||||
{input: []string{
|
{
|
||||||
filepath.FromSlash(`NewSuperHeroMovie-2019-English-720p.avi /../../../../../Roaming/Microsoft/Windows/Start Menu/Programs/Startup/test3.exe`)},
|
input: []string{
|
||||||
|
filepath.FromSlash(`NewSuperHeroMovie-2019-English-720p.avi /../../../../../Roaming/Microsoft/Windows/Start Menu/Programs/Startup/test3.exe`),
|
||||||
|
},
|
||||||
expectErr: true,
|
expectErr: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,7 +49,7 @@ func BenchmarkMarkComplete(b *testing.B) {
|
||||||
var opts NewDirectStorageOpts
|
var opts NewDirectStorageOpts
|
||||||
opts.Memory = memory
|
opts.Memory = memory
|
||||||
opts.Capacity = capacity
|
opts.Capacity = capacity
|
||||||
//opts.GcBlobs = true
|
// opts.GcBlobs = true
|
||||||
opts.BlobFlushInterval = time.Second
|
opts.BlobFlushInterval = time.Second
|
||||||
opts.NoTriggers = noTriggers
|
opts.NoTriggers = noTriggers
|
||||||
directBench := func(b *testing.B) {
|
directBench := func(b *testing.B) {
|
||||||
|
|
1
t.go
1
t.go
|
@ -232,7 +232,6 @@ func (t *Torrent) initFiles() {
|
||||||
})
|
})
|
||||||
offset += fi.Length
|
offset += fi.Length
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns handles to the files in the torrent. This requires that the Info is
|
// Returns handles to the files in the torrent. This requires that the Info is
|
||||||
|
|
|
@ -98,7 +98,7 @@ func testReceiveChunkStorageFailure(t *testing.T, seederFast bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// TODO: Check that PeerConns fastEnabled matches seederFast?
|
// TODO: Check that PeerConns fastEnabled matches seederFast?
|
||||||
//select {}
|
// select {}
|
||||||
}
|
}
|
||||||
|
|
||||||
type pieceState struct {
|
type pieceState struct {
|
||||||
|
|
|
@ -68,7 +68,7 @@ func testClientTransfer(t *testing.T, ps testClientTransferParams) {
|
||||||
defer os.RemoveAll(greetingTempDir)
|
defer os.RemoveAll(greetingTempDir)
|
||||||
// Create seeder and a Torrent.
|
// Create seeder and a Torrent.
|
||||||
cfg := torrent.TestingConfig(t)
|
cfg := torrent.TestingConfig(t)
|
||||||
//cfg.Debug = true
|
// cfg.Debug = true
|
||||||
cfg.Seed = true
|
cfg.Seed = true
|
||||||
// Some test instances don't like this being on, even when there's no cache involved.
|
// Some test instances don't like this being on, even when there's no cache involved.
|
||||||
cfg.DropMutuallyCompletePeers = false
|
cfg.DropMutuallyCompletePeers = false
|
||||||
|
@ -116,7 +116,7 @@ func testClientTransfer(t *testing.T, ps testClientTransferParams) {
|
||||||
cfg.DownloadRateLimiter = ps.LeecherDownloadRateLimiter
|
cfg.DownloadRateLimiter = ps.LeecherDownloadRateLimiter
|
||||||
}
|
}
|
||||||
cfg.Seed = false
|
cfg.Seed = false
|
||||||
//cfg.Debug = true
|
// cfg.Debug = true
|
||||||
if ps.ConfigureLeecher.Config != nil {
|
if ps.ConfigureLeecher.Config != nil {
|
||||||
ps.ConfigureLeecher.Config(cfg)
|
ps.ConfigureLeecher.Config(cfg)
|
||||||
}
|
}
|
||||||
|
@ -180,7 +180,7 @@ func testClientTransfer(t *testing.T, ps testClientTransferParams) {
|
||||||
leecherPeerConns := leecherTorrent.PeerConns()
|
leecherPeerConns := leecherTorrent.PeerConns()
|
||||||
if cfg.DropMutuallyCompletePeers {
|
if cfg.DropMutuallyCompletePeers {
|
||||||
// I don't think we can assume it will be empty already, due to timing.
|
// I don't think we can assume it will be empty already, due to timing.
|
||||||
//assert.Empty(t, leecherPeerConns)
|
// assert.Empty(t, leecherPeerConns)
|
||||||
} else {
|
} else {
|
||||||
assert.NotEmpty(t, leecherPeerConns)
|
assert.NotEmpty(t, leecherPeerConns)
|
||||||
}
|
}
|
||||||
|
@ -296,8 +296,8 @@ func testClientTransferSmallCache(t *testing.T, setReadahead bool, readahead int
|
||||||
ConfigureLeecher: ConfigureClient{
|
ConfigureLeecher: ConfigureClient{
|
||||||
Config: func(cfg *torrent.ClientConfig) {
|
Config: func(cfg *torrent.ClientConfig) {
|
||||||
cfg.DropDuplicatePeerIds = true
|
cfg.DropDuplicatePeerIds = true
|
||||||
//cfg.DisableIPv6 = true
|
// cfg.DisableIPv6 = true
|
||||||
//cfg.DisableUTP = true
|
// cfg.DisableUTP = true
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
|
@ -209,7 +209,6 @@ func (t *Torrent) KnownSwarm() (ks []PeerInfo) {
|
||||||
|
|
||||||
// Add active peers to the list
|
// Add active peers to the list
|
||||||
for conn := range t.conns {
|
for conn := range t.conns {
|
||||||
|
|
||||||
ks = append(ks, PeerInfo{
|
ks = append(ks, PeerInfo{
|
||||||
Id: conn.PeerID,
|
Id: conn.PeerID,
|
||||||
Addr: conn.RemoteAddr,
|
Addr: conn.RemoteAddr,
|
||||||
|
@ -888,10 +887,10 @@ func (t *Torrent) hashPiece(piece pieceIndex) (ret metainfo.Hash, err error) {
|
||||||
p.waitNoPendingWrites()
|
p.waitNoPendingWrites()
|
||||||
storagePiece := t.pieces[piece].Storage()
|
storagePiece := t.pieces[piece].Storage()
|
||||||
|
|
||||||
//Does the backend want to do its own hashing?
|
// Does the backend want to do its own hashing?
|
||||||
if i, ok := storagePiece.PieceImpl.(storage.SelfHashing); ok {
|
if i, ok := storagePiece.PieceImpl.(storage.SelfHashing); ok {
|
||||||
var sum metainfo.Hash
|
var sum metainfo.Hash
|
||||||
//log.Printf("A piece decided to self-hash: %d", piece)
|
// log.Printf("A piece decided to self-hash: %d", piece)
|
||||||
sum, err = i.SelfHash()
|
sum, err = i.SelfHash()
|
||||||
missinggo.CopyExact(&ret, sum)
|
missinggo.CopyExact(&ret, sum)
|
||||||
return
|
return
|
||||||
|
@ -1506,7 +1505,6 @@ func (t *Torrent) startWebsocketAnnouncer(u url.URL) torrentTrackerAnnouncer {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
return wst
|
return wst
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Torrent) startScrapingTracker(_url string) {
|
func (t *Torrent) startScrapingTracker(_url string) {
|
||||||
|
|
|
@ -58,7 +58,8 @@ func TestSetAnnounceInfohashParamWithSpaces(t *testing.T) {
|
||||||
someUrl := &url.URL{}
|
someUrl := &url.URL{}
|
||||||
ihBytes := [20]uint8{
|
ihBytes := [20]uint8{
|
||||||
0x2b, 0x76, 0xa, 0xa1, 0x78, 0x93, 0x20, 0x30, 0xc8, 0x47,
|
0x2b, 0x76, 0xa, 0xa1, 0x78, 0x93, 0x20, 0x30, 0xc8, 0x47,
|
||||||
0xdc, 0xdf, 0x8e, 0xae, 0xbf, 0x56, 0xa, 0x1b, 0xd1, 0x6c}
|
0xdc, 0xdf, 0x8e, 0xae, 0xbf, 0x56, 0xa, 0x1b, 0xd1, 0x6c,
|
||||||
|
}
|
||||||
setAnnounceParams(
|
setAnnounceParams(
|
||||||
someUrl,
|
someUrl,
|
||||||
&udp.AnnounceRequest{
|
&udp.AnnounceRequest{
|
||||||
|
|
|
@ -28,9 +28,7 @@ type Peer = trHttp.Peer
|
||||||
|
|
||||||
type AnnounceEvent = udp.AnnounceEvent
|
type AnnounceEvent = udp.AnnounceEvent
|
||||||
|
|
||||||
var (
|
var ErrBadScheme = errors.New("unknown scheme")
|
||||||
ErrBadScheme = errors.New("unknown scheme")
|
|
||||||
)
|
|
||||||
|
|
||||||
type Announce struct {
|
type Announce struct {
|
||||||
TrackerUrl string
|
TrackerUrl string
|
||||||
|
|
|
@ -112,7 +112,6 @@ func (me *trackerScraper) trackerUrl(ip net.IP) string {
|
||||||
// Return how long to wait before trying again. For most errors, we return 5
|
// Return how long to wait before trying again. For most errors, we return 5
|
||||||
// minutes, a relatively quick turn around for DNS changes.
|
// minutes, a relatively quick turn around for DNS changes.
|
||||||
func (me *trackerScraper) announce(ctx context.Context, event tracker.AnnounceEvent) (ret trackerAnnounceResult) {
|
func (me *trackerScraper) announce(ctx context.Context, event tracker.AnnounceEvent) (ret trackerAnnounceResult) {
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
ret.Completed = time.Now()
|
ret.Completed = time.Now()
|
||||||
}()
|
}()
|
||||||
|
@ -190,7 +189,6 @@ func (me *trackerScraper) canIgnoreInterval(notify *<-chan struct{}) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (me *trackerScraper) Run() {
|
func (me *trackerScraper) Run() {
|
||||||
|
|
||||||
defer me.announceStopped()
|
defer me.announceStopped()
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
|
@ -150,7 +150,6 @@ func (tc *TrackerClient) Close() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *TrackerClient) announceOffers() {
|
func (tc *TrackerClient) announceOffers() {
|
||||||
|
|
||||||
// tc.Announce grabs a lock on tc.outboundOffers. It also handles the case where outboundOffers
|
// tc.Announce grabs a lock on tc.outboundOffers. It also handles the case where outboundOffers
|
||||||
// is nil. Take ownership of outboundOffers here.
|
// is nil. Take ownership of outboundOffers here.
|
||||||
tc.mu.Lock()
|
tc.mu.Lock()
|
||||||
|
@ -256,7 +255,7 @@ func (tc *TrackerClient) trackerReadLoop(tracker *websocket.Conn) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("read message error: %w", err)
|
return fmt.Errorf("read message error: %w", err)
|
||||||
}
|
}
|
||||||
//tc.Logger.WithDefaultLevel(log.Debug).Printf("received message from tracker: %q", message)
|
// tc.Logger.WithDefaultLevel(log.Debug).Printf("received message from tracker: %q", message)
|
||||||
|
|
||||||
var ar AnnounceResponse
|
var ar AnnounceResponse
|
||||||
if err := json.Unmarshal(message, &ar); err != nil {
|
if err := json.Unmarshal(message, &ar); err != nil {
|
||||||
|
@ -337,7 +336,7 @@ func (tc *TrackerClient) handleAnswer(offerId string, answer webrtc.SessionDescr
|
||||||
tc.Logger.WithDefaultLevel(log.Warning).Printf("could not find offer for id %+q", offerId)
|
tc.Logger.WithDefaultLevel(log.Warning).Printf("could not find offer for id %+q", offerId)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
//tc.Logger.WithDefaultLevel(log.Debug).Printf("offer %q got answer %v", offerId, answer)
|
// tc.Logger.WithDefaultLevel(log.Debug).Printf("offer %q got answer %v", offerId, answer)
|
||||||
metrics.Add("outbound offers answered", 1)
|
metrics.Add("outbound offers answered", 1)
|
||||||
err := offer.setAnswer(answer, func(dc datachannel.ReadWriteCloser) {
|
err := offer.setAnswer(answer, func(dc datachannel.ReadWriteCloser) {
|
||||||
metrics.Add("outbound offers answered with datachannel", 1)
|
metrics.Add("outbound offers answered with datachannel", 1)
|
||||||
|
|
Loading…
Reference in New Issue