perf(stream): improve file stream range reading and caching mechanism (#1001)

* perf(stream): improve file stream range reading and caching mechanism

* 。

* add bytes_test.go

* fix(stream): handle EOF and buffer reading more gracefully

* 注释

* refactor: update CacheFullAndWriter to accept pointer for UpdateProgress

* update tests

* Update drivers/google_drive/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>

* 更优雅的克隆Link

* 修复stream已缓存但无法重复读取

* 将Bytes类型重命名为Reader

* 修复栈溢出

* update tests

---------

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
j2rong4cn
2025-08-11 23:41:22 +08:00
committed by GitHub
parent 8c244a984d
commit 57fceabcf4
48 changed files with 657 additions and 380 deletions

View File

@ -186,9 +186,7 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
preHash = strings.ToUpper(preHash)
fullHash := stream.GetHash().GetHash(utils.SHA1)
if len(fullHash) != utils.SHA1.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA1)
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
if err != nil {
return nil, err
}

View File

@ -321,7 +321,7 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
err error
)
tmpF, err := s.CacheFullInTempFile()
tmpF, err := s.CacheFullAndWriter(&up, nil)
if err != nil {
return nil, err
}

View File

@ -239,9 +239,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
}
sha1 := file.GetHash().GetHash(utils.SHA1)
if len(sha1) != utils.SHA1.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, sha1, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.SHA1)
_, sha1, err = stream.CacheFullAndHash(file, &up, utils.SHA1)
if err != nil {
return err
}

View File

@ -86,13 +86,14 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
fileSize := stream.GetSize()
chunkSize := calPartSize(fileSize)
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
parts := make([]oss.UploadPart, partNum)
offset := int64(0)
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize))
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize), &up)
if err != nil {
return err
}
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
parts := make([]oss.UploadPart, partNum)
offset := int64(0)
for i := int64(1); i <= partNum; i++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -119,7 +120,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
ss.RecycleSectionReader(rd)
ss.FreeSectionReader(rd)
if err != nil {
return err
}

View File

@ -182,9 +182,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
etag := file.GetHash().GetHash(utils.MD5)
var err error
if len(etag) < utils.MD5.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
if err != nil {
return err
}

View File

@ -81,6 +81,12 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
if size > chunkSize {
chunkCount = int((size + chunkSize - 1) / chunkSize)
}
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return err
}
lastChunkSize := size % chunkSize
if lastChunkSize == 0 {
lastChunkSize = chunkSize
@ -92,10 +98,6 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
batchSize = 10
getS3UploadUrl = d.getS3PreSignedUrls
}
ss, err := stream.NewStreamSectionReader(file, int(chunkSize))
if err != nil {
return err
}
thread := min(int(chunkCount), d.UploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
@ -180,7 +182,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
return nil
},
After: func(err error) {
ss.RecycleSectionReader(reader)
ss.FreeSectionReader(reader)
},
})
}

View File

@ -132,9 +132,7 @@ func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
// etag 文件md5
etag := file.GetHash().GetHash(utils.MD5)
if len(etag) < utils.MD5.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
if err != nil {
return nil, err
}

View File

@ -46,6 +46,12 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
uploadDomain := createResp.Data.Servers[0]
size := file.GetSize()
chunkSize := createResp.Data.SliceSize
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return err
}
uploadNums := (size + chunkSize - 1) / chunkSize
thread := min(int(uploadNums), d.UploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
@ -53,10 +59,6 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
ss, err := stream.NewStreamSectionReader(file, int(chunkSize))
if err != nil {
return err
}
for partIndex := range uploadNums {
if utils.IsCanceled(uploadCtx) {
break
@ -157,7 +159,7 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
return nil
},
After: func(err error) {
ss.RecycleSectionReader(reader)
ss.FreeSectionReader(reader)
},
})
}

View File

@ -522,9 +522,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
var err error
fullHash := stream.GetHash().GetHash(utils.SHA256)
if len(fullHash) != utils.SHA256.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA256)
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA256)
if err != nil {
return err
}

View File

@ -5,17 +5,19 @@ import (
"encoding/base64"
"encoding/xml"
"fmt"
"github.com/skip2/go-qrcode"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/skip2/go-qrcode"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
@ -311,11 +313,14 @@ func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
// 旧版本上传,家庭云不支持覆盖
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, err
fileMd5 := file.GetHash().GetHash(utils.MD5)
var tempFile = file.GetFile()
var err error
if len(fileMd5) != utils.MD5.Width {
tempFile, fileMd5, err = stream.CacheFullAndHash(file, &up, utils.MD5)
} else if tempFile == nil {
tempFile, err = file.CacheFullAndWriter(&up, nil)
}
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
if err != nil {
return nil, err
}
@ -345,7 +350,7 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
}
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily)
_, err := y.put(ctx, status.FileUploadUrl, header, true, tempFile, isFamily)
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
return nil, err
}

View File

@ -500,7 +500,8 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
if err != nil {
return nil, err
}
ss, err := stream.NewStreamSectionReader(file, int(sliceSize))
ss, err := stream.NewStreamSectionReader(file, int(sliceSize), &up)
if err != nil {
return nil, err
}
@ -581,7 +582,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
return nil
},
After: func(err error) {
ss.RecycleSectionReader(reader)
ss.FreeSectionReader(reader)
},
},
)
@ -857,9 +858,7 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo
// 旧版本上传,家庭云不支持覆盖
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
tempFile, fileMd5, err := stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
tempFile, fileMd5, err := stream.CacheFullAndHash(file, &up, utils.MD5)
if err != nil {
return nil, err
}

View File

@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"io"
"net/url"
stdpath "path"
"strings"
@ -12,6 +13,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
@ -160,25 +162,18 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
sign.Sign(reqPath)),
}, nil
}
resultLink := *link
resultLink.SyncClosers = utils.NewSyncClosers(link)
if args.Redirect {
return link, nil
return &resultLink, nil
}
resultLink := &model.Link{
URL: link.URL,
Header: link.Header,
RangeReader: link.RangeReader,
MFile: link.MFile,
Concurrency: link.Concurrency,
PartSize: link.PartSize,
ContentLength: link.ContentLength,
SyncClosers: utils.NewSyncClosers(link),
}
if resultLink.ContentLength == 0 {
resultLink.ContentLength = fi.GetSize()
}
if resultLink.MFile != nil {
return resultLink, nil
return &resultLink, nil
}
if d.DownloadConcurrency > 0 {
resultLink.Concurrency = d.DownloadConcurrency
@ -186,7 +181,7 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if d.DownloadPartSize > 0 {
resultLink.PartSize = d.DownloadPartSize * utils.KB
}
return resultLink, nil
return &resultLink, nil
}
return nil, errs.ObjectNotFound
}
@ -313,24 +308,29 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer,
reqPath, err := d.getReqPath(ctx, dstDir, true)
if err == nil {
if len(reqPath) == 1 {
return fs.PutDirectly(ctx, *reqPath[0], &stream.FileStream{
Obj: s,
Mimetype: s.GetMimetype(),
WebPutAsTask: s.NeedStore(),
Reader: s,
})
} else {
file, err := s.CacheFullInTempFile()
storage, reqActualPath, err := op.GetStorageAndActualPath(*reqPath[0])
if err != nil {
return err
}
for _, path := range reqPath {
return op.Put(ctx, storage, reqActualPath, &stream.FileStream{
Obj: s,
Mimetype: s.GetMimetype(),
Reader: s,
}, up)
} else {
file, err := s.CacheFullAndWriter(nil, nil)
if err != nil {
return err
}
count := float64(len(reqPath) + 1)
up(100 / count)
for i, path := range reqPath {
err = errors.Join(err, fs.PutDirectly(ctx, *path, &stream.FileStream{
Obj: s,
Mimetype: s.GetMimetype(),
WebPutAsTask: s.NeedStore(),
Reader: file,
Obj: s,
Mimetype: s.GetMimetype(),
Reader: file,
}))
up(float64(i+2) / float64(count) * 100)
_, e := file.Seek(0, io.SeekStart)
if e != nil {
return errors.Join(err, e)
@ -402,10 +402,24 @@ func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveIn
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
link, err := d.extract(ctx, dst, sub, args)
if err == nil {
return link, nil
reqPath := stdpath.Join(dst, sub)
link, err := d.extract(ctx, reqPath, args)
if err != nil {
continue
}
if link == nil {
return &model.Link{
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}, nil
}
resultLink := *link
resultLink.SyncClosers = utils.NewSyncClosers(link)
return &resultLink, nil
}
return nil, errs.NotImplement
}

View File

@ -2,8 +2,6 @@ package alias
import (
"context"
"fmt"
"net/url"
stdpath "path"
"strings"
@ -12,8 +10,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
)
@ -140,8 +136,7 @@ func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.Arc
return nil, errs.NotImplement
}
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) {
reqPath := stdpath.Join(dst, sub)
func (d *Alias) extract(ctx context.Context, reqPath string, args model.ArchiveInnerArgs) (*model.Link, error) {
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil {
return nil, err
@ -149,20 +144,12 @@ func (d *Alias) extract(ctx context.Context, dst, sub string, args model.Archive
if _, ok := storage.(driver.ArchiveReader); !ok {
return nil, errs.NotImplement
}
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(sub)) {
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil {
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(reqPath)) {
_, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err == nil {
return nil, err
}
link := &model.Link{
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}
return link, nil
return nil, nil
}
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
return link, err

View File

@ -191,9 +191,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
hash := stream.GetHash().GetHash(utils.SHA1)
if len(hash) != utils.SHA1.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, hash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA1)
_, hash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
if err != nil {
return nil, err
}
@ -218,14 +216,13 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if !createResp.RapidUpload {
// 2. normal upload
log.Debugf("[aliyundive_open] normal upload")
preTime := time.Now()
var offset, length int64 = 0, partSize
//var length
ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize))
ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize), &up)
if err != nil {
return nil, err
}
preTime := time.Now()
var offset, length int64 = 0, partSize
for i := 0; i < len(createResp.PartInfoList); i++ {
if utils.IsCanceled(ctx) {
return nil, ctx.Err()
@ -253,7 +250,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
ss.RecycleSectionReader(rd)
ss.FreeSectionReader(rd)
if err != nil {
return nil, err
}

View File

@ -237,15 +237,16 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
}
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadURLs[0]
credential := u.Credential
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
if err != nil {
return err
}
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -294,7 +295,7 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.RecycleSectionReader(rd)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
@ -306,13 +307,14 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
}
func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
uploadUrl := u.UploadURLs[0]
var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadURLs[0]
var finish int64 = 0
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -353,7 +355,7 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.RecycleSectionReader(rd)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
@ -367,14 +369,15 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
}
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
var finish int64 = 0
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
var finish int64 = 0
var chunk int = 0
var etags []string
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -415,7 +418,7 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.RecycleSectionReader(rd)
ss.FreeSectionReader(rd)
if err != nil {
return err
}

View File

@ -252,15 +252,16 @@ func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u Fi
}
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadUrls[0]
credential := u.Credential
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT))
if err != nil {
return err
}
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -309,7 +310,7 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.RecycleSectionReader(rd)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
@ -321,13 +322,14 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
}
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
uploadUrl := u.UploadUrls[0]
var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT))
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadUrls[0]
var finish int64 = 0
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -369,7 +371,7 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.RecycleSectionReader(rd)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
@ -383,14 +385,15 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
}
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
var finish int64 = 0
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT))
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
var finish int64 = 0
var chunk int = 0
var etags []string
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -432,7 +435,7 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.RecycleSectionReader(rd)
ss.FreeSectionReader(rd)
if err != nil {
return err
}

View File

@ -401,7 +401,6 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
},
Reader: wrappedIn,
Mimetype: "application/octet-stream",
WebPutAsTask: streamer.NeedStore(),
ForceStreamUpload: true,
Exist: streamer.GetExist(),
}

View File

@ -449,10 +449,11 @@ func (d *Doubao) uploadNode(uploadConfig *UploadConfig, dir model.Obj, file mode
// Upload 普通上传实现
func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
ss, err := stream.NewStreamSectionReader(file, int(file.GetSize()))
ss, err := stream.NewStreamSectionReader(file, int(file.GetSize()), &up)
if err != nil {
return nil, err
}
reader, err := ss.GetSectionReader(0, file.GetSize())
if err != nil {
return nil, err
@ -503,7 +504,7 @@ func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.
}
return nil
})
ss.RecycleSectionReader(reader)
ss.FreeSectionReader(reader)
if err != nil {
return nil, err
}
@ -542,15 +543,15 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
if config.InnerUploadAddress.AdvanceOption.SliceSize > 0 {
chunkSize = int64(config.InnerUploadAddress.AdvanceOption.SliceSize)
}
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return nil, err
}
totalParts := (fileSize + chunkSize - 1) / chunkSize
// 创建分片信息组
parts := make([]UploadPart, totalParts)
// 用 stream.NewStreamSectionReader 替代缓存临时文件
ss, err := stream.NewStreamSectionReader(file, int(chunkSize))
if err != nil {
return nil, err
}
up(10.0) // 更新进度
// 设置并行上传
thread := min(int(totalParts), d.uploadThread)
@ -641,7 +642,7 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
return nil
},
After: func(err error) {
ss.RecycleSectionReader(reader)
ss.FreeSectionReader(reader)
},
})
}

View File

@ -162,7 +162,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
SetBody(driver.NewLimitedUploadStream(ctx, stream))
}, nil)
} else {
err = d.chunkUpload(ctx, stream, putUrl)
err = d.chunkUpload(ctx, stream, putUrl, up)
}
return err
}

View File

@ -254,13 +254,14 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
return res, nil
}
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string) error {
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
var defaultChunkSize = d.ChunkSize * 1024 * 1024
var offset int64 = 0
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize))
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)
if err != nil {
return err
}
var offset int64 = 0
url += "?includeItemsFromAllDrives=true&supportsAllDrives=true"
for offset < file.GetSize() {
if utils.IsCanceled(ctx) {
@ -300,12 +301,13 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer,
}
return fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
}
up(float64(offset+chunkSize) / float64(file.GetSize()) * 100)
return nil
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
ss.RecycleSectionReader(reader)
ss.FreeSectionReader(reader)
if err != nil {
return err
}

View File

@ -276,9 +276,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
etag := s.GetHash().GetHash(utils.MD5)
var err error
if len(etag) != utils.MD5.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, etag, err = stream.CacheFullInTempFileAndHash(s, cacheFileProgress, utils.MD5)
_, etag, err = stream.CacheFullAndHash(s, &up, utils.MD5)
if err != nil {
return nil, err
}

View File

@ -180,7 +180,7 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, file model.FileS
if err != nil {
return err
}
tempFile, err := file.CacheFullInTempFile()
tempFile, err := file.CacheFullAndWriter(&up, nil)
if err != nil {
return err
}

View File

@ -263,7 +263,7 @@ func (d *MoPan) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
file, err := stream.CacheFullInTempFile()
file, err := stream.CacheFullAndWriter(&up, nil)
if err != nil {
return nil, err
}

View File

@ -223,7 +223,7 @@ func (d *NeteaseMusic) removeSongObj(file model.Obj) error {
}
func (d *NeteaseMusic) putSongStream(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) error {
tmp, err := stream.CacheFullInTempFile()
tmp, err := stream.CacheFullAndWriter(&up, nil)
if err != nil {
return err
}

View File

@ -238,13 +238,14 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
if err != nil {
return err
}
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
var finish int64 = 0
DEFAULT := d.ChunkSize * 1024 * 1024
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
var finish int64 = 0
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -285,7 +286,7 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.RecycleSectionReader(rd)
ss.FreeSectionReader(rd)
if err != nil {
return err
}

View File

@ -152,13 +152,14 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
if err != nil {
return err
}
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
var finish int64 = 0
DEFAULT := d.ChunkSize * 1024 * 1024
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
var finish int64 = 0
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -199,7 +200,7 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.RecycleSectionReader(rd)
ss.FreeSectionReader(rd)
if err != nil {
return err
}

View File

@ -12,6 +12,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
hash_extend "github.com/OpenListTeam/OpenList/v4/pkg/utils/hash"
"github.com/go-resty/resty/v2"
@ -212,15 +213,11 @@ func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
hi := stream.GetHash()
sha1Str := hi.GetHash(hash_extend.GCID)
if len(sha1Str) < hash_extend.GCID.Width {
tFile, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
sha1Str := stream.GetHash().GetHash(hash_extend.GCID)
sha1Str, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
if len(sha1Str) < hash_extend.GCID.Width {
var err error
_, sha1Str, err = streamPkg.CacheFullAndHash(stream, &up, hash_extend.GCID, stream.GetSize())
if err != nil {
return err
}

View File

@ -438,20 +438,19 @@ func (d *PikPak) UploadByOSS(ctx context.Context, params *S3Params, s model.File
}
func (d *PikPak) UploadByMultipart(ctx context.Context, params *S3Params, fileSize int64, s model.FileStreamer, up driver.UpdateProgress) error {
tmpF, err := s.CacheFullAndWriter(&up, nil)
if err != nil {
return err
}
var (
chunks []oss.FileChunk
parts []oss.UploadPart
imur oss.InitiateMultipartUploadResult
ossClient *oss.Client
bucket *oss.Bucket
err error
)
tmpF, err := s.CacheFullInTempFile()
if err != nil {
return err
}
if ossClient, err = oss.New(params.Endpoint, params.AccessKeyID, params.AccessKeySecret); err != nil {
return err
}

View File

@ -14,7 +14,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
)
@ -158,9 +157,7 @@ func (d *QuarkOpen) Put(ctx context.Context, dstDir model.Obj, stream model.File
}
if len(writers) > 0 {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, err := streamPkg.CacheFullInTempFileAndWriter(stream, cacheFileProgress, io.MultiWriter(writers...))
_, err := stream.CacheFullAndWriter(&up, io.MultiWriter(writers...))
if err != nil {
return err
}

View File

@ -13,7 +13,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
@ -144,9 +143,7 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
}
if len(writers) > 0 {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, err := streamPkg.CacheFullInTempFileAndWriter(stream, cacheFileProgress, io.MultiWriter(writers...))
_, err := stream.CacheFullAndWriter(&up, io.MultiWriter(writers...))
if err != nil {
return err
}

View File

@ -173,8 +173,9 @@ func (d *Strm) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
}, nil
}
// 没有修改link的字段可直接返回
return link, nil
resultLink := *link
resultLink.SyncClosers = utils.NewSyncClosers(link)
return &resultLink, nil
}
var _ driver.Driver = (*Strm)(nil)

View File

@ -179,7 +179,7 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
}
// upload chunks
tempFile, err := stream.CacheFullInTempFile()
tempFile, err := stream.CacheFullAndWriter(&up, nil)
if err != nil {
return err
}

View File

@ -371,9 +371,7 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.Fi
gcid := file.GetHash().GetHash(hash_extend.GCID)
var err error
if len(gcid) < hash_extend.GCID.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, gcid, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, hash_extend.GCID, file.GetSize())
_, gcid, err = stream.CacheFullAndHash(file, &up, hash_extend.GCID, file.GetSize())
if err != nil {
return err
}

View File

@ -491,9 +491,7 @@ func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream
gcid := stream.GetHash().GetHash(hash_extend.GCID)
var err error
if len(gcid) < hash_extend.GCID.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, gcid, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, hash_extend.GCID, stream.GetSize())
_, gcid, err = streamPkg.CacheFullAndHash(stream, &up, hash_extend.GCID, stream.GetSize())
if err != nil {
return err
}

View File

@ -372,9 +372,7 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, file model.F
gcid := file.GetHash().GetHash(hash_extend.GCID)
var err error
if len(gcid) < hash_extend.GCID.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, gcid, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, hash_extend.GCID, file.GetSize())
_, gcid, err = stream.CacheFullAndHash(file, &up, hash_extend.GCID, file.GetSize())
if err != nil {
return err
}

View File

@ -317,7 +317,7 @@ func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
if folder, ok = dstDir.(*Folder); !ok {
return nil, errs.NotSupport
}
file, err := stream.CacheFullInTempFile()
file, err := stream.CacheFullAndWriter(&up, nil)
if err != nil {
return nil, err
}