Sanitize metainfo file paths for file-based storage
Fixes exploit where specially crafted infos can cause the client to write files to arbitrary locations on local storage when using file-based storages like mmap and file.
This commit is contained in:
parent
635ec27fb3
commit
89235e180f
|
@ -1,6 +1,7 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -43,7 +44,9 @@ func NewFileByInfoHash(baseDir string) ClientImpl {
|
|||
return NewFileWithCustomPathMaker(baseDir, infoHashPathMaker)
|
||||
}
|
||||
|
||||
// Allows passing a function to determine the path for storing torrent data
|
||||
// Allows passing a function to determine the path for storing torrent data. The function is
|
||||
// responsible for sanitizing the info if it uses some part of it (for example sanitizing
|
||||
// info.Name).
|
||||
func NewFileWithCustomPathMaker(baseDir string, pathMaker func(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string) ClientImpl {
|
||||
return newFileWithCustomPathMakerAndCompletion(baseDir, pathMaker, pieceCompletionForDir(baseDir))
|
||||
}
|
||||
|
@ -65,25 +68,41 @@ func (me *fileClientImpl) Close() error {
|
|||
|
||||
func (fs *fileClientImpl) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (TorrentImpl, error) {
|
||||
dir := fs.pathMaker(fs.baseDir, info, infoHash)
|
||||
err := CreateNativeZeroLengthFiles(info, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
upvertedFiles := info.UpvertedFiles()
|
||||
files := make([]file, 0, len(upvertedFiles))
|
||||
for i, fileInfo := range upvertedFiles {
|
||||
s, err := ToSafeFilePath(append([]string{info.Name}, fileInfo.Path...)...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("file %v has unsafe path %q", i, fileInfo.Path)
|
||||
}
|
||||
f := file{
|
||||
path: filepath.Join(dir, s),
|
||||
length: fileInfo.Length,
|
||||
}
|
||||
if f.length == 0 {
|
||||
err = CreateNativeZeroLengthFile(f.path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating zero length file: %w", err)
|
||||
}
|
||||
}
|
||||
files = append(files, f)
|
||||
}
|
||||
return &fileTorrentImpl{
|
||||
dir,
|
||||
info.Name,
|
||||
upvertedFiles,
|
||||
files,
|
||||
segments.NewIndex(common.LengthIterFromUpvertedFiles(upvertedFiles)),
|
||||
infoHash,
|
||||
fs.pc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type file struct {
|
||||
// The safe, OS-local file path.
|
||||
path string
|
||||
length int64
|
||||
}
|
||||
|
||||
type fileTorrentImpl struct {
|
||||
dir string
|
||||
infoName string
|
||||
upvertedFiles []metainfo.FileInfo
|
||||
files []file
|
||||
segmentLocater segments.Index
|
||||
infoHash metainfo.Hash
|
||||
completion PieceCompletion
|
||||
|
@ -105,24 +124,17 @@ func (fs *fileTorrentImpl) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Creates natives files for any zero-length file entries in the info. This is
|
||||
// a helper for file-based storages, which don't address or write to zero-
|
||||
// length files because they have no corresponding pieces.
|
||||
func CreateNativeZeroLengthFiles(info *metainfo.Info, dir string) (err error) {
|
||||
for _, fi := range info.UpvertedFiles() {
|
||||
if fi.Length != 0 {
|
||||
continue
|
||||
}
|
||||
name := filepath.Join(append([]string{dir, info.Name}, fi.Path...)...)
|
||||
os.MkdirAll(filepath.Dir(name), 0777)
|
||||
var f io.Closer
|
||||
f, err = os.Create(name)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
f.Close()
|
||||
// A helper to create zero-length files which won't appear for file-orientated storage since no
|
||||
// writes will ever occur to them (no torrent data is associated with a zero-length file). The
|
||||
// caller should make sure the file name provided is safe/sanitized.
|
||||
func CreateNativeZeroLengthFile(name string) error {
|
||||
os.MkdirAll(filepath.Dir(name), 0777)
|
||||
var f io.Closer
|
||||
f, err := os.Create(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
// Exposes file-based storage of a torrent, as one big ReadWriterAt.
|
||||
|
@ -131,8 +143,8 @@ type fileTorrentImplIO struct {
|
|||
}
|
||||
|
||||
// Returns EOF on short or missing file.
|
||||
func (fst *fileTorrentImplIO) readFileAt(fi metainfo.FileInfo, b []byte, off int64) (n int, err error) {
|
||||
f, err := os.Open(fst.fts.fileInfoName(fi))
|
||||
func (fst *fileTorrentImplIO) readFileAt(file file, b []byte, off int64) (n int, err error) {
|
||||
f, err := os.Open(file.path)
|
||||
if os.IsNotExist(err) {
|
||||
// File missing is treated the same as a short file.
|
||||
err = io.EOF
|
||||
|
@ -143,10 +155,10 @@ func (fst *fileTorrentImplIO) readFileAt(fi metainfo.FileInfo, b []byte, off int
|
|||
}
|
||||
defer f.Close()
|
||||
// Limit the read to within the expected bounds of this file.
|
||||
if int64(len(b)) > fi.Length-off {
|
||||
b = b[:fi.Length-off]
|
||||
if int64(len(b)) > file.length-off {
|
||||
b = b[:file.length-off]
|
||||
}
|
||||
for off < fi.Length && len(b) != 0 {
|
||||
for off < file.length && len(b) != 0 {
|
||||
n1, err1 := f.ReadAt(b, off)
|
||||
b = b[n1:]
|
||||
n += n1
|
||||
|
@ -162,7 +174,7 @@ func (fst *fileTorrentImplIO) readFileAt(fi metainfo.FileInfo, b []byte, off int
|
|||
// Only returns EOF at the end of the torrent. Premature EOF is ErrUnexpectedEOF.
|
||||
func (fst fileTorrentImplIO) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
fst.fts.segmentLocater.Locate(segments.Extent{off, int64(len(b))}, func(i int, e segments.Extent) bool {
|
||||
n1, err1 := fst.readFileAt(fst.fts.upvertedFiles[i], b[:e.Length], e.Start)
|
||||
n1, err1 := fst.readFileAt(fst.fts.files[i], b[:e.Length], e.Start)
|
||||
n += n1
|
||||
b = b[n1:]
|
||||
err = err1
|
||||
|
@ -177,7 +189,7 @@ func (fst fileTorrentImplIO) ReadAt(b []byte, off int64) (n int, err error) {
|
|||
func (fst fileTorrentImplIO) WriteAt(p []byte, off int64) (n int, err error) {
|
||||
//log.Printf("write at %v: %v bytes", off, len(p))
|
||||
fst.fts.segmentLocater.Locate(segments.Extent{off, int64(len(p))}, func(i int, e segments.Extent) bool {
|
||||
name := fst.fts.fileInfoName(fst.fts.upvertedFiles[i])
|
||||
name := fst.fts.files[i].path
|
||||
os.MkdirAll(filepath.Dir(name), 0777)
|
||||
var f *os.File
|
||||
f, err = os.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0666)
|
||||
|
@ -200,7 +212,3 @@ func (fst fileTorrentImplIO) WriteAt(p []byte, off int64) (n int, err error) {
|
|||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (fts *fileTorrentImpl) fileInfoName(fi metainfo.FileInfo) string {
|
||||
return filepath.Join(append([]string{fts.dir, fts.infoName}, fi.Path...)...)
|
||||
}
|
||||
|
|
|
@ -2,11 +2,16 @@ package storage
|
|||
|
||||
import "github.com/anacrolix/torrent/metainfo"
|
||||
|
||||
func extentCompleteRequiredLengths(info *metainfo.Info, off, n int64) (ret []metainfo.FileInfo) {
|
||||
type requiredLength struct {
|
||||
fileIndex int
|
||||
length int64
|
||||
}
|
||||
|
||||
func extentCompleteRequiredLengths(info *metainfo.Info, off, n int64) (ret []requiredLength) {
|
||||
if n == 0 {
|
||||
return
|
||||
}
|
||||
for _, fi := range info.UpvertedFiles() {
|
||||
for i, fi := range info.UpvertedFiles() {
|
||||
if off >= fi.Length {
|
||||
off -= fi.Length
|
||||
continue
|
||||
|
@ -15,9 +20,9 @@ func extentCompleteRequiredLengths(info *metainfo.Info, off, n int64) (ret []met
|
|||
if off+n1 > fi.Length {
|
||||
n1 = fi.Length - off
|
||||
}
|
||||
ret = append(ret, metainfo.FileInfo{
|
||||
Path: fi.Path,
|
||||
Length: off + n1,
|
||||
ret = append(ret, requiredLength{
|
||||
fileIndex: i,
|
||||
length: off + n1,
|
||||
})
|
||||
n -= n1
|
||||
if n == 0 {
|
||||
|
|
|
@ -16,21 +16,21 @@ func TestExtentCompleteRequiredLengths(t *testing.T) {
|
|||
},
|
||||
}
|
||||
assert.Empty(t, extentCompleteRequiredLengths(info, 0, 0))
|
||||
assert.EqualValues(t, []metainfo.FileInfo{
|
||||
{Path: []string{"a"}, Length: 1},
|
||||
assert.EqualValues(t, []requiredLength{
|
||||
{fileIndex: 0, length: 1},
|
||||
}, extentCompleteRequiredLengths(info, 0, 1))
|
||||
assert.EqualValues(t, []metainfo.FileInfo{
|
||||
{Path: []string{"a"}, Length: 2},
|
||||
assert.EqualValues(t, []requiredLength{
|
||||
{fileIndex: 0, length: 2},
|
||||
}, extentCompleteRequiredLengths(info, 0, 2))
|
||||
assert.EqualValues(t, []metainfo.FileInfo{
|
||||
{Path: []string{"a"}, Length: 2},
|
||||
{Path: []string{"b"}, Length: 1},
|
||||
assert.EqualValues(t, []requiredLength{
|
||||
{fileIndex: 0, length: 2},
|
||||
{fileIndex: 1, length: 1},
|
||||
}, extentCompleteRequiredLengths(info, 0, 3))
|
||||
assert.EqualValues(t, []metainfo.FileInfo{
|
||||
{Path: []string{"b"}, Length: 2},
|
||||
assert.EqualValues(t, []requiredLength{
|
||||
{fileIndex: 1, length: 2},
|
||||
}, extentCompleteRequiredLengths(info, 2, 2))
|
||||
assert.EqualValues(t, []metainfo.FileInfo{
|
||||
{Path: []string{"b"}, Length: 3},
|
||||
assert.EqualValues(t, []requiredLength{
|
||||
{fileIndex: 1, length: 3},
|
||||
}, extentCompleteRequiredLengths(info, 4, 1))
|
||||
assert.Len(t, extentCompleteRequiredLengths(info, 5, 0), 0)
|
||||
assert.Panics(t, func() { extentCompleteRequiredLengths(info, 6, 1) })
|
||||
|
|
|
@ -31,8 +31,8 @@ func (fs *filePieceImpl) Completion() Completion {
|
|||
if c.Complete {
|
||||
// If it's allegedly complete, check that its constituent files have the necessary length.
|
||||
for _, fi := range extentCompleteRequiredLengths(fs.p.Info, fs.p.Offset(), fs.p.Length()) {
|
||||
s, err := os.Stat(fs.fileInfoName(fi))
|
||||
if err != nil || s.Size() < fi.Length {
|
||||
s, err := os.Stat(fs.files[fi.fileIndex].path)
|
||||
if err != nil || s.Size() < fi.length {
|
||||
c.Complete = false
|
||||
break
|
||||
}
|
||||
|
|
|
@ -106,7 +106,12 @@ func mMapTorrent(md *metainfo.Info, location string) (mms *mmap_span.MMapSpan, e
|
|||
}
|
||||
}()
|
||||
for _, miFile := range md.UpvertedFiles() {
|
||||
fileName := filepath.Join(append([]string{location, md.Name}, miFile.Path...)...)
|
||||
var safeName string
|
||||
safeName, err = ToSafeFilePath(append([]string{md.Name}, miFile.Path...)...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
fileName := filepath.Join(location, safeName)
|
||||
var mm mmap.MMap
|
||||
mm, err = mmapFile(fileName, miFile.Length)
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Get the first file path component. We can't use filepath.Split because that breaks off the last
|
||||
// one. We could optimize this to avoid allocating a slice down the track.
|
||||
func firstComponent(filePath string) string {
|
||||
return strings.SplitN(filePath, string(filepath.Separator), 2)[0]
|
||||
}
|
||||
|
||||
// Combines file info path components, ensuring the result won't escape into parent directories.
|
||||
func ToSafeFilePath(fileInfoComponents ...string) (string, error) {
|
||||
safeComps := make([]string, 0, len(fileInfoComponents))
|
||||
for _, comp := range fileInfoComponents {
|
||||
safeComps = append(safeComps, filepath.Clean(comp))
|
||||
}
|
||||
safeFilePath := filepath.Join(safeComps...)
|
||||
fc := firstComponent(safeFilePath)
|
||||
log.Printf("%q", fc)
|
||||
switch fc {
|
||||
case "..":
|
||||
return "", errors.New("escapes root dir")
|
||||
default:
|
||||
return safeFilePath, nil
|
||||
}
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"log"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.SetFlags(log.Flags() | log.Lshortfile)
|
||||
}
|
||||
|
||||
func TestSafePath(t *testing.T) {
|
||||
for _, _case := range []struct {
|
||||
input []string
|
||||
expected string
|
||||
expectErr bool
|
||||
}{
|
||||
{input: []string{"a", filepath.FromSlash(`b/../../..`)}, expectErr: true},
|
||||
{input: []string{"a", filepath.FromSlash(`b/../.././..`)}, expectErr: true},
|
||||
{input: []string{
|
||||
filepath.FromSlash(`NewSuperHeroMovie-2019-English-720p.avi /../../../../../Roaming/Microsoft/Windows/Start Menu/Programs/Startup/test3.exe`)},
|
||||
expectErr: true,
|
||||
},
|
||||
} {
|
||||
actual, err := ToSafeFilePath(_case.input...)
|
||||
if _case.expectErr {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
t.Errorf("%q: expected error, got output %q", _case.input, actual)
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue