Remove deadcode
This commit is contained in:
parent
bd7981dc19
commit
1566e2afdc
17
client.go
17
client.go
|
@ -99,8 +99,7 @@ const (
|
|||
// Limit how long handshake can take. This is to reduce the lingering
|
||||
// impact of a few bad apples. 4s loses 1% of successful handshakes that
|
||||
// are obtained with 60s timeout, and 5% of unsuccessful handshakes.
|
||||
btHandshakeTimeout = 4 * time.Second
|
||||
handshakesTimeout = 20 * time.Second
|
||||
handshakesTimeout = 20 * time.Second
|
||||
|
||||
// These are our extended message IDs.
|
||||
metadataExtendedId = iota + 1 // 0 is reserved for deleting keys
|
||||
|
@ -256,20 +255,6 @@ func (cl *Client) WriteStatus(_w io.Writer) {
|
|||
}
|
||||
}
|
||||
|
||||
// Calculates the number of pieces to set to Readahead priority, after the
|
||||
// Now, and Next pieces.
|
||||
func readaheadPieces(readahead, pieceLength int64) (ret int) {
|
||||
// Expand the readahead to fit any partial pieces. Subtract 1 for the
|
||||
// "next" piece that is assigned.
|
||||
ret = int((readahead+pieceLength-1)/pieceLength - 1)
|
||||
// Lengthen the "readahead tail" to smooth blockiness that occurs when the
|
||||
// piece length is much larger than the readahead.
|
||||
if ret < 2 {
|
||||
ret++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (cl *Client) configDir() string {
|
||||
if cl.config.ConfigDir == "" {
|
||||
return filepath.Join(os.Getenv("HOME"), ".config/torrent")
|
||||
|
|
|
@ -423,23 +423,6 @@ func TestSeedAfterDownloading(t *testing.T) {
|
|||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestReadaheadPieces(t *testing.T) {
|
||||
for _, case_ := range []struct {
|
||||
readaheadBytes, pieceLength int64
|
||||
readaheadPieces int
|
||||
}{
|
||||
{5 * 1024 * 1024, 256 * 1024, 19},
|
||||
{5 * 1024 * 1024, 5 * 1024 * 1024, 1},
|
||||
{5*1024*1024 - 1, 5 * 1024 * 1024, 1},
|
||||
{5 * 1024 * 1024, 5*1024*1024 - 1, 2},
|
||||
{0, 5 * 1024 * 1024, 0},
|
||||
{5 * 1024 * 1024, 1048576, 4},
|
||||
} {
|
||||
pieces := readaheadPieces(case_.readaheadBytes, case_.pieceLength)
|
||||
assert.Equal(t, case_.readaheadPieces, pieces, "%v", case_)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergingTrackersByAddingSpecs(t *testing.T) {
|
||||
cl, err := NewClient(&TestingConfig)
|
||||
require.NoError(t, err)
|
||||
|
|
22
misc.go
22
misc.go
|
@ -21,12 +21,6 @@ const (
|
|||
minDialTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
func lastChunkSpec(pieceLength, chunkSize pp.Integer) (cs chunkSpec) {
|
||||
cs.Begin = (pieceLength - 1) / chunkSize * chunkSize
|
||||
cs.Length = pieceLength - cs.Begin
|
||||
return
|
||||
}
|
||||
|
||||
type chunkSpec struct {
|
||||
Begin, Length pp.Integer
|
||||
}
|
||||
|
@ -40,11 +34,6 @@ func newRequest(index, begin, length pp.Integer) request {
|
|||
return request{index, chunkSpec{begin, length}}
|
||||
}
|
||||
|
||||
var (
|
||||
// Requested data not yet available.
|
||||
errDataNotReady = errors.New("data not ready")
|
||||
)
|
||||
|
||||
// The size in bytes of a metadata extension piece.
|
||||
func metadataPieceSize(totalSize int, piece int) int {
|
||||
ret := totalSize - piece*(1<<14)
|
||||
|
@ -58,17 +47,6 @@ type superer interface {
|
|||
Super() interface{}
|
||||
}
|
||||
|
||||
// Returns ok if there's a parent, and it's not nil.
|
||||
func super(child interface{}) (parent interface{}, ok bool) {
|
||||
s, ok := child.(superer)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
parent = s.Super()
|
||||
ok = parent != nil
|
||||
return
|
||||
}
|
||||
|
||||
// Return the request that would include the given offset into the torrent data.
|
||||
func torrentOffsetRequest(torrentLength, pieceSize, chunkSize, offset int64) (
|
||||
r request, ok bool) {
|
||||
|
|
|
@ -517,10 +517,6 @@ func (t *Torrent) piecePartiallyDownloaded(piece int) bool {
|
|||
return t.pieces[piece].hasDirtyChunks()
|
||||
}
|
||||
|
||||
func numChunksForPiece(chunkSize int, pieceSize int) int {
|
||||
return (pieceSize + chunkSize - 1) / chunkSize
|
||||
}
|
||||
|
||||
func (t *Torrent) usualPieceSize() int {
|
||||
return int(t.info.PieceLength)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue