mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-09-19 12:16:24 +08:00
perf: optimize upload (#554)
* pref(115,123): optimize upload * chore * aliyun_open, google_drive * fix bug * chore * cloudreve, cloudreve_v4, onedrive, onedrive_app * chore(conf): add `max_buffer_limit` option * 123pan multithread upload * doubao * google_drive * chore * chore * chore: 计算分片数量的代码 * MaxBufferLimit自动挡 * MaxBufferLimit自动挡 * 189pc * errorgroup添加Lifecycle * 查缺补漏 * Conf.MaxBufferLimit单位为MB * 。 --------- Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
This commit is contained in:
@ -1,12 +1,14 @@
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
@ -187,3 +189,68 @@ func CacheFullInTempFileAndHash(stream model.FileStreamer, up model.UpdateProgre
|
||||
}
|
||||
return tmpF, hex.EncodeToString(h.Sum(nil)), err
|
||||
}
|
||||
|
||||
type StreamSectionReader struct {
|
||||
file model.FileStreamer
|
||||
off int64
|
||||
bufPool *sync.Pool
|
||||
}
|
||||
|
||||
func NewStreamSectionReader(file model.FileStreamer, maxBufferSize int) (*StreamSectionReader, error) {
|
||||
ss := &StreamSectionReader{file: file}
|
||||
if file.GetFile() == nil {
|
||||
maxBufferSize = min(maxBufferSize, int(file.GetSize()))
|
||||
if maxBufferSize > conf.MaxBufferLimit {
|
||||
_, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
ss.bufPool = &sync.Pool{
|
||||
New: func() any {
|
||||
return make([]byte, maxBufferSize)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
// 线程不安全
|
||||
func (ss *StreamSectionReader) GetSectionReader(off, length int64) (*SectionReader, error) {
|
||||
var cache io.ReaderAt = ss.file.GetFile()
|
||||
var buf []byte
|
||||
if cache == nil {
|
||||
if off != ss.off {
|
||||
return nil, fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off)
|
||||
}
|
||||
tempBuf := ss.bufPool.Get().([]byte)
|
||||
buf = tempBuf[:length]
|
||||
n, err := io.ReadFull(ss.file, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if int64(n) != length {
|
||||
return nil, fmt.Errorf("can't read data, expected=%d, got=%d", length, n)
|
||||
}
|
||||
ss.off += int64(n)
|
||||
off = 0
|
||||
cache = bytes.NewReader(buf)
|
||||
}
|
||||
return &SectionReader{io.NewSectionReader(cache, off, length), buf}, nil
|
||||
}
|
||||
|
||||
func (ss *StreamSectionReader) RecycleSectionReader(sr *SectionReader) {
|
||||
if sr != nil {
|
||||
if sr.buf != nil {
|
||||
ss.bufPool.Put(sr.buf[0:cap(sr.buf)])
|
||||
sr.buf = nil
|
||||
}
|
||||
sr.ReadSeeker = nil
|
||||
}
|
||||
}
|
||||
|
||||
type SectionReader struct {
|
||||
io.ReadSeeker
|
||||
buf []byte
|
||||
}
|
||||
|
Reference in New Issue
Block a user