mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-09-19 04:06:18 +08:00
refactor: optimize stream, link, and resource management (#486)
* refactor: optimize stream, link, and resource management * Link.MFile改为io.ReadSeeker类型 * fix (crypt): read on closed response body * chore * chore * chore
This commit is contained in:
@ -18,8 +18,9 @@ import (
|
||||
)
|
||||
|
||||
type VolumeFile struct {
|
||||
stream.SStreamReadAtSeeker
|
||||
model.File
|
||||
name string
|
||||
ss model.FileStreamer
|
||||
}
|
||||
|
||||
func (v *VolumeFile) Name() string {
|
||||
@ -27,7 +28,7 @@ func (v *VolumeFile) Name() string {
|
||||
}
|
||||
|
||||
func (v *VolumeFile) Size() int64 {
|
||||
return v.SStreamReadAtSeeker.GetRawStream().GetSize()
|
||||
return v.ss.GetSize()
|
||||
}
|
||||
|
||||
func (v *VolumeFile) Mode() fs.FileMode {
|
||||
@ -35,7 +36,7 @@ func (v *VolumeFile) Mode() fs.FileMode {
|
||||
}
|
||||
|
||||
func (v *VolumeFile) ModTime() time.Time {
|
||||
return v.SStreamReadAtSeeker.GetRawStream().ModTime()
|
||||
return v.ss.ModTime()
|
||||
}
|
||||
|
||||
func (v *VolumeFile) IsDir() bool {
|
||||
@ -74,7 +75,7 @@ func makeOpts(ss []*stream.SeekableStream) (string, rardecode.Option, error) {
|
||||
}
|
||||
fileName := "file.rar"
|
||||
fsys := &VolumeFs{parts: map[string]*VolumeFile{
|
||||
fileName: {SStreamReadAtSeeker: reader, name: fileName},
|
||||
fileName: {File: reader, name: fileName},
|
||||
}}
|
||||
return fileName, rardecode.FileSystem(fsys), nil
|
||||
} else {
|
||||
@ -85,7 +86,7 @@ func makeOpts(ss []*stream.SeekableStream) (string, rardecode.Option, error) {
|
||||
return "", nil, err
|
||||
}
|
||||
fileName := fmt.Sprintf("file.part%d.rar", i+1)
|
||||
parts[fileName] = &VolumeFile{SStreamReadAtSeeker: reader, name: fileName}
|
||||
parts[fileName] = &VolumeFile{File: reader, name: fileName, ss: s}
|
||||
}
|
||||
return "file.part1.rar", rardecode.FileSystem(&VolumeFs{parts: parts}), nil
|
||||
}
|
||||
|
@ -27,10 +27,9 @@ type Link struct {
|
||||
URL string `json:"url"` // most common way
|
||||
Header http.Header `json:"header"` // needed header (for url)
|
||||
RangeReadCloser RangeReadCloserIF `json:"-"` // recommended way if can't use URL
|
||||
MFile File `json:"-"` // best for local,smb... file system, which exposes MFile
|
||||
MFile io.ReadSeeker `json:"-"` // best for local,smb... file system, which exposes MFile
|
||||
|
||||
Expiration *time.Duration // local cache expire Duration
|
||||
IPCacheKey bool `json:"-"` // add ip to cache key
|
||||
|
||||
//for accelerating request, use multi-thread downloading
|
||||
Concurrency int `json:"concurrency"`
|
||||
|
@ -7,19 +7,4 @@ type File interface {
|
||||
io.Reader
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
io.Closer
|
||||
}
|
||||
|
||||
type NopMFileIF interface {
|
||||
io.Reader
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
}
|
||||
type NopMFile struct {
|
||||
NopMFileIF
|
||||
}
|
||||
|
||||
func (NopMFile) Close() error { return nil }
|
||||
func NewNopMFile(r NopMFileIF) File {
|
||||
return NopMFile{r}
|
||||
}
|
||||
|
@ -182,11 +182,10 @@ func (d *downloader) download() (io.ReadCloser, error) {
|
||||
defer d.m.Unlock()
|
||||
if closeFunc != nil {
|
||||
d.concurrencyFinish()
|
||||
err := closeFunc()
|
||||
err = closeFunc()
|
||||
closeFunc = nil
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
})
|
||||
return resp.Body, nil
|
||||
}
|
||||
@ -272,24 +271,30 @@ func (d *downloader) sendChunkTask(newConcurrency bool) error {
|
||||
|
||||
// when the final reader Close, we interrupt
|
||||
func (d *downloader) interrupt() error {
|
||||
if d.written != d.params.Range.Length {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
err := d.err
|
||||
if err == nil && d.written != d.params.Range.Length {
|
||||
log.Debugf("Downloader interrupt before finish")
|
||||
if d.getErr() == nil {
|
||||
d.setErr(fmt.Errorf("interrupted"))
|
||||
}
|
||||
err := fmt.Errorf("interrupted")
|
||||
d.err = err
|
||||
}
|
||||
d.cancel(d.err)
|
||||
defer func() {
|
||||
if d.chunkChannel != nil {
|
||||
d.cancel(err)
|
||||
close(d.chunkChannel)
|
||||
d.chunkChannel = nil
|
||||
for _, buf := range d.bufs {
|
||||
buf.Close()
|
||||
}
|
||||
d.bufs = nil
|
||||
if d.concurrency > 0 {
|
||||
d.concurrency = -d.concurrency
|
||||
}
|
||||
log.Debugf("maxConcurrency:%d", d.cfg.Concurrency+d.concurrency)
|
||||
}()
|
||||
return d.err
|
||||
} else {
|
||||
log.Debug("close of closed channel")
|
||||
}
|
||||
return err
|
||||
}
|
||||
func (d *downloader) getBuf(id int) (b *Buf) {
|
||||
return d.bufs[id%len(d.bufs)]
|
||||
|
@ -62,8 +62,8 @@ func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path st
|
||||
}
|
||||
baseName, ext, found := strings.Cut(obj.GetName(), ".")
|
||||
if !found {
|
||||
if l.MFile != nil {
|
||||
_ = l.MFile.Close()
|
||||
if clr, ok := l.MFile.(io.Closer); ok {
|
||||
_ = clr.Close()
|
||||
}
|
||||
if l.RangeReadCloser != nil {
|
||||
_ = l.RangeReadCloser.Close()
|
||||
@ -75,8 +75,8 @@ func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path st
|
||||
var e error
|
||||
partExt, t, e = tool.GetArchiveTool(stdpath.Ext(obj.GetName()))
|
||||
if e != nil {
|
||||
if l.MFile != nil {
|
||||
_ = l.MFile.Close()
|
||||
if clr, ok := l.MFile.(io.Closer); ok {
|
||||
_ = clr.Close()
|
||||
}
|
||||
if l.RangeReadCloser != nil {
|
||||
_ = l.RangeReadCloser.Close()
|
||||
@ -86,8 +86,8 @@ func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path st
|
||||
}
|
||||
ss, err := stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: obj}, l)
|
||||
if err != nil {
|
||||
if l.MFile != nil {
|
||||
_ = l.MFile.Close()
|
||||
if clr, ok := l.MFile.(io.Closer); ok {
|
||||
_ = clr.Close()
|
||||
}
|
||||
if l.RangeReadCloser != nil {
|
||||
_ = l.RangeReadCloser.Close()
|
||||
@ -109,8 +109,8 @@ func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path st
|
||||
}
|
||||
ss, err = stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: o}, l)
|
||||
if err != nil {
|
||||
if l.MFile != nil {
|
||||
_ = l.MFile.Close()
|
||||
if clr, ok := l.MFile.(io.Closer); ok {
|
||||
_ = clr.Close()
|
||||
}
|
||||
if l.RangeReadCloser != nil {
|
||||
_ = l.RangeReadCloser.Close()
|
||||
@ -174,9 +174,6 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
|
||||
if !storage.Config().NoCache {
|
||||
Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
|
||||
archiveMetaProvider.Expiration = &Expiration
|
||||
} else if ss[0].Link.MFile == nil {
|
||||
// alias、crypt 驱动
|
||||
archiveMetaProvider.Expiration = ss[0].Link.Expiration
|
||||
}
|
||||
return obj, archiveMetaProvider, err
|
||||
}
|
||||
@ -401,9 +398,6 @@ func DriverExtract(ctx context.Context, storage driver.Driver, path string, args
|
||||
return nil, errors.Wrapf(err, "failed extract archive")
|
||||
}
|
||||
if link.Link.Expiration != nil {
|
||||
if link.Link.IPCacheKey {
|
||||
key = key + ":" + args.IP
|
||||
}
|
||||
extractCache.Set(key, link, cache.WithEx[*extractLink](*link.Link.Expiration))
|
||||
}
|
||||
return link, nil
|
||||
|
@ -268,9 +268,6 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
|
||||
return nil, errors.Wrapf(err, "failed get link")
|
||||
}
|
||||
if link.Expiration != nil {
|
||||
if link.IPCacheKey {
|
||||
key = key + ":" + args.IP
|
||||
}
|
||||
linkCache.Set(key, link, cache.WithEx[*model.Link](*link.Expiration))
|
||||
}
|
||||
return link, nil
|
||||
|
@ -135,6 +135,13 @@ func (r *RateLimitFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (r *RateLimitFile) Close() error {
|
||||
if c, ok := r.File.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type RateLimitRangeReadCloser struct {
|
||||
model.RangeReadCloserIF
|
||||
Limiter Limiter
|
||||
|
@ -81,10 +81,7 @@ func (f *FileStream) SetExist(obj model.Obj) {
|
||||
// CacheFullInTempFile save all data into tmpFile. Not recommended since it wears disk,
|
||||
// and can't start upload until the file is written. It's not thread-safe!
|
||||
func (f *FileStream) CacheFullInTempFile() (model.File, error) {
|
||||
if f.tmpFile != nil {
|
||||
return f.tmpFile, nil
|
||||
}
|
||||
if file, ok := f.Reader.(model.File); ok {
|
||||
if file := f.GetFile(); file != nil {
|
||||
return file, nil
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(f.Reader, f.GetSize())
|
||||
@ -117,33 +114,35 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
// 参考 internal/net/request.go
|
||||
httpRange.Length = f.GetSize() - httpRange.Start
|
||||
}
|
||||
var cache io.ReaderAt = f.GetFile()
|
||||
if cache != nil {
|
||||
return io.NewSectionReader(cache, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
|
||||
size := httpRange.Start + httpRange.Length
|
||||
if f.peekBuff != nil && size <= int64(f.peekBuff.Len()) {
|
||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
var cache io.ReaderAt = f.GetFile()
|
||||
if cache == nil {
|
||||
if size <= InMemoryBufMaxSizeBytes {
|
||||
bufSize := min(size, f.GetSize())
|
||||
// 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom
|
||||
// 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大
|
||||
buf := make([]byte, bufSize)
|
||||
n, err := io.ReadFull(f.Reader, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n != int(bufSize) {
|
||||
return nil, fmt.Errorf("stream RangeRead did not get all data in peek, expect =%d ,actual =%d", bufSize, n)
|
||||
}
|
||||
f.peekBuff = bytes.NewReader(buf)
|
||||
f.Reader = io.MultiReader(f.peekBuff, f.Reader)
|
||||
cache = f.peekBuff
|
||||
} else {
|
||||
var err error
|
||||
cache, err = f.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if size <= InMemoryBufMaxSizeBytes {
|
||||
bufSize := min(size, f.GetSize())
|
||||
// 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom
|
||||
// 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大
|
||||
buf := make([]byte, bufSize)
|
||||
n, err := io.ReadFull(f.Reader, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n != int(bufSize) {
|
||||
return nil, fmt.Errorf("stream RangeRead did not get all data in peek, expect =%d ,actual =%d", bufSize, n)
|
||||
}
|
||||
f.peekBuff = bytes.NewReader(buf)
|
||||
f.Reader = io.MultiReader(f.peekBuff, f.Reader)
|
||||
cache = f.peekBuff
|
||||
} else {
|
||||
var err error
|
||||
cache, err = f.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return io.NewSectionReader(cache, httpRange.Start, httpRange.Length), nil
|
||||
@ -161,49 +160,34 @@ var _ model.FileStreamer = (*FileStream)(nil)
|
||||
// the SeekableStream object and be closed together when the SeekableStream object is closed.
|
||||
type SeekableStream struct {
|
||||
FileStream
|
||||
Link *model.Link
|
||||
// should have one of belows to support rangeRead
|
||||
rangeReadCloser model.RangeReadCloserIF
|
||||
mFile model.File
|
||||
}
|
||||
|
||||
func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) {
|
||||
if len(fs.Mimetype) == 0 {
|
||||
fs.Mimetype = utils.GetMimeType(fs.Obj.GetName())
|
||||
}
|
||||
ss := &SeekableStream{FileStream: fs, Link: link}
|
||||
ss := &SeekableStream{FileStream: fs}
|
||||
if ss.Reader != nil {
|
||||
result, ok := ss.Reader.(model.File)
|
||||
if ok {
|
||||
ss.mFile = result
|
||||
ss.Closers.Add(result)
|
||||
return ss, nil
|
||||
}
|
||||
ss.TryAdd(ss.Reader)
|
||||
return ss, nil
|
||||
}
|
||||
if ss.Link != nil {
|
||||
if ss.Link.MFile != nil {
|
||||
mFile := ss.Link.MFile
|
||||
if _, ok := mFile.(*os.File); !ok {
|
||||
mFile = &RateLimitFile{
|
||||
File: mFile,
|
||||
Limiter: ServerDownloadLimit,
|
||||
Ctx: fs.Ctx,
|
||||
}
|
||||
}
|
||||
ss.mFile = mFile
|
||||
ss.Reader = mFile
|
||||
ss.Closers.Add(mFile)
|
||||
if link != nil {
|
||||
if link.MFile != nil {
|
||||
ss.Closers.TryAdd(link.MFile)
|
||||
ss.Reader = link.MFile
|
||||
return ss, nil
|
||||
}
|
||||
if ss.Link.RangeReadCloser != nil {
|
||||
if link.RangeReadCloser != nil {
|
||||
ss.rangeReadCloser = &RateLimitRangeReadCloser{
|
||||
RangeReadCloserIF: ss.Link.RangeReadCloser,
|
||||
RangeReadCloserIF: link.RangeReadCloser,
|
||||
Limiter: ServerDownloadLimit,
|
||||
}
|
||||
ss.Add(ss.rangeReadCloser)
|
||||
return ss, nil
|
||||
}
|
||||
if len(ss.Link.URL) > 0 {
|
||||
if len(link.URL) > 0 {
|
||||
rrc, err := GetRangeReadCloserFromLink(ss.GetSize(), link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -217,9 +201,6 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error)
|
||||
return ss, nil
|
||||
}
|
||||
}
|
||||
if fs.Reader != nil {
|
||||
return ss, nil
|
||||
}
|
||||
return nil, fmt.Errorf("illegal seekableStream")
|
||||
}
|
||||
|
||||
@ -229,16 +210,10 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error)
|
||||
|
||||
// RangeRead is not thread-safe, pls use it in single thread only.
|
||||
func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
if httpRange.Length == -1 {
|
||||
httpRange.Length = ss.GetSize() - httpRange.Start
|
||||
}
|
||||
if ss.mFile != nil {
|
||||
return io.NewSectionReader(ss.mFile, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
if ss.tmpFile != nil {
|
||||
return io.NewSectionReader(ss.tmpFile, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
if ss.rangeReadCloser != nil {
|
||||
if ss.tmpFile == nil && ss.rangeReadCloser != nil {
|
||||
if httpRange.Length == -1 {
|
||||
httpRange.Length = ss.GetSize() - httpRange.Start
|
||||
}
|
||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, httpRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -272,11 +247,8 @@ func (ss *SeekableStream) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) {
|
||||
if ss.tmpFile != nil {
|
||||
return ss.tmpFile, nil
|
||||
}
|
||||
if ss.mFile != nil {
|
||||
return ss.mFile, nil
|
||||
if file := ss.GetFile(); file != nil {
|
||||
return file, nil
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(ss, ss.GetSize())
|
||||
if err != nil {
|
||||
@ -288,16 +260,6 @@ func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) {
|
||||
return tmpF, nil
|
||||
}
|
||||
|
||||
func (ss *SeekableStream) GetFile() model.File {
|
||||
if ss.tmpFile != nil {
|
||||
return ss.tmpFile
|
||||
}
|
||||
if ss.mFile != nil {
|
||||
return ss.mFile
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FileStream) SetTmpFile(r *os.File) {
|
||||
f.Add(r)
|
||||
f.tmpFile = r
|
||||
@ -342,11 +304,6 @@ func (r *ReaderUpdatingProgress) Close() error {
|
||||
return r.Reader.Close()
|
||||
}
|
||||
|
||||
type SStreamReadAtSeeker interface {
|
||||
model.File
|
||||
GetRawStream() *SeekableStream
|
||||
}
|
||||
|
||||
type readerCur struct {
|
||||
reader io.Reader
|
||||
cur int64
|
||||
@ -407,7 +364,7 @@ func (r *headCache) Close() error {
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) InitHeadCache() {
|
||||
if r.ss.Link.MFile == nil && r.masterOff == 0 {
|
||||
if r.ss.GetFile() == nil && r.masterOff == 0 {
|
||||
reader := r.readers[0]
|
||||
r.readers = r.readers[1:]
|
||||
r.headCache = &headCache{readerCur: reader}
|
||||
@ -415,13 +372,13 @@ func (r *RangeReadReadAtSeeker) InitHeadCache() {
|
||||
}
|
||||
}
|
||||
|
||||
func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStreamReadAtSeeker, error) {
|
||||
if ss.mFile != nil {
|
||||
_, err := ss.mFile.Seek(offset, io.SeekStart)
|
||||
func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (model.File, error) {
|
||||
if ss.GetFile() != nil {
|
||||
_, err := ss.GetFile().Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FileReadAtSeeker{ss: ss}, nil
|
||||
return ss.GetFile(), nil
|
||||
}
|
||||
r := &RangeReadReadAtSeeker{
|
||||
ss: ss,
|
||||
@ -454,10 +411,6 @@ func NewMultiReaderAt(ss []*SeekableStream) (readerutil.SizeReaderAt, error) {
|
||||
return readerutil.NewMultiReaderAt(readers...), nil
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream {
|
||||
return r.ss
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (*readerCur, error) {
|
||||
var rc *readerCur
|
||||
for _, reader := range r.readers {
|
||||
@ -562,31 +515,3 @@ func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) {
|
||||
r.masterOff += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) Close() error {
|
||||
return r.ss.Close()
|
||||
}
|
||||
|
||||
type FileReadAtSeeker struct {
|
||||
ss *SeekableStream
|
||||
}
|
||||
|
||||
func (f *FileReadAtSeeker) GetRawStream() *SeekableStream {
|
||||
return f.ss
|
||||
}
|
||||
|
||||
func (f *FileReadAtSeeker) Read(p []byte) (n int, err error) {
|
||||
return f.ss.mFile.Read(p)
|
||||
}
|
||||
|
||||
func (f *FileReadAtSeeker) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
return f.ss.mFile.ReadAt(p, off)
|
||||
}
|
||||
|
||||
func (f *FileReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
return f.ss.mFile.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (f *FileReadAtSeeker) Close() error {
|
||||
return f.ss.Close()
|
||||
}
|
||||
|
Reference in New Issue
Block a user