mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-09-20 04:36:09 +08:00
perf(stream): improve file stream range reading and caching mechanism (#1001)
* perf(stream): improve file stream range reading and caching mechanism * 。 * add bytes_test.go * fix(stream): handle EOF and buffer reading more gracefully * 注释 * refactor: update CacheFullAndWriter to accept pointer for UpdateProgress * update tests * Update drivers/google_drive/util.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> * 更优雅的克隆Link * 修复stream已缓存但无法重复读取 * 将Bytes类型重命名为Reader * 修复栈溢出 * update tests --------- Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
@ -186,9 +186,7 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
preHash = strings.ToUpper(preHash)
|
preHash = strings.ToUpper(preHash)
|
||||||
fullHash := stream.GetHash().GetHash(utils.SHA1)
|
fullHash := stream.GetHash().GetHash(utils.SHA1)
|
||||||
if len(fullHash) != utils.SHA1.Width {
|
if len(fullHash) != utils.SHA1.Width {
|
||||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
|
||||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
|
||||||
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA1)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -321,7 +321,7 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
|
|||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
tmpF, err := s.CacheFullInTempFile()
|
tmpF, err := s.CacheFullAndWriter(&up, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -239,9 +239,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
|||||||
}
|
}
|
||||||
sha1 := file.GetHash().GetHash(utils.SHA1)
|
sha1 := file.GetHash().GetHash(utils.SHA1)
|
||||||
if len(sha1) != utils.SHA1.Width {
|
if len(sha1) != utils.SHA1.Width {
|
||||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
_, sha1, err = stream.CacheFullAndHash(file, &up, utils.SHA1)
|
||||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
|
||||||
_, sha1, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.SHA1)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -86,13 +86,14 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
|
|||||||
|
|
||||||
fileSize := stream.GetSize()
|
fileSize := stream.GetSize()
|
||||||
chunkSize := calPartSize(fileSize)
|
chunkSize := calPartSize(fileSize)
|
||||||
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
|
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize), &up)
|
||||||
parts := make([]oss.UploadPart, partNum)
|
|
||||||
offset := int64(0)
|
|
||||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
|
||||||
|
parts := make([]oss.UploadPart, partNum)
|
||||||
|
offset := int64(0)
|
||||||
for i := int64(1); i <= partNum; i++ {
|
for i := int64(1); i <= partNum; i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@ -119,7 +120,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
|
|||||||
retry.Attempts(3),
|
retry.Attempts(3),
|
||||||
retry.DelayType(retry.BackOffDelay),
|
retry.DelayType(retry.BackOffDelay),
|
||||||
retry.Delay(time.Second))
|
retry.Delay(time.Second))
|
||||||
ss.RecycleSectionReader(rd)
|
ss.FreeSectionReader(rd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -182,9 +182,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
|
|||||||
etag := file.GetHash().GetHash(utils.MD5)
|
etag := file.GetHash().GetHash(utils.MD5)
|
||||||
var err error
|
var err error
|
||||||
if len(etag) < utils.MD5.Width {
|
if len(etag) < utils.MD5.Width {
|
||||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
|
||||||
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -81,6 +81,12 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
|||||||
if size > chunkSize {
|
if size > chunkSize {
|
||||||
chunkCount = int((size + chunkSize - 1) / chunkSize)
|
chunkCount = int((size + chunkSize - 1) / chunkSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
lastChunkSize := size % chunkSize
|
lastChunkSize := size % chunkSize
|
||||||
if lastChunkSize == 0 {
|
if lastChunkSize == 0 {
|
||||||
lastChunkSize = chunkSize
|
lastChunkSize = chunkSize
|
||||||
@ -92,10 +98,6 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
|||||||
batchSize = 10
|
batchSize = 10
|
||||||
getS3UploadUrl = d.getS3PreSignedUrls
|
getS3UploadUrl = d.getS3PreSignedUrls
|
||||||
}
|
}
|
||||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
thread := min(int(chunkCount), d.UploadThread)
|
thread := min(int(chunkCount), d.UploadThread)
|
||||||
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
||||||
@ -180,7 +182,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
After: func(err error) {
|
After: func(err error) {
|
||||||
ss.RecycleSectionReader(reader)
|
ss.FreeSectionReader(reader)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -132,9 +132,7 @@ func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
|||||||
// etag 文件md5
|
// etag 文件md5
|
||||||
etag := file.GetHash().GetHash(utils.MD5)
|
etag := file.GetHash().GetHash(utils.MD5)
|
||||||
if len(etag) < utils.MD5.Width {
|
if len(etag) < utils.MD5.Width {
|
||||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
|
||||||
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -46,6 +46,12 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
|||||||
uploadDomain := createResp.Data.Servers[0]
|
uploadDomain := createResp.Data.Servers[0]
|
||||||
size := file.GetSize()
|
size := file.GetSize()
|
||||||
chunkSize := createResp.Data.SliceSize
|
chunkSize := createResp.Data.SliceSize
|
||||||
|
|
||||||
|
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
uploadNums := (size + chunkSize - 1) / chunkSize
|
uploadNums := (size + chunkSize - 1) / chunkSize
|
||||||
thread := min(int(uploadNums), d.UploadThread)
|
thread := min(int(uploadNums), d.UploadThread)
|
||||||
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
||||||
@ -53,10 +59,6 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
|||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
retry.DelayType(retry.BackOffDelay))
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
|
||||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for partIndex := range uploadNums {
|
for partIndex := range uploadNums {
|
||||||
if utils.IsCanceled(uploadCtx) {
|
if utils.IsCanceled(uploadCtx) {
|
||||||
break
|
break
|
||||||
@ -157,7 +159,7 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
After: func(err error) {
|
After: func(err error) {
|
||||||
ss.RecycleSectionReader(reader)
|
ss.FreeSectionReader(reader)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -522,9 +522,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
var err error
|
var err error
|
||||||
fullHash := stream.GetHash().GetHash(utils.SHA256)
|
fullHash := stream.GetHash().GetHash(utils.SHA256)
|
||||||
if len(fullHash) != utils.SHA256.Width {
|
if len(fullHash) != utils.SHA256.Width {
|
||||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA256)
|
||||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
|
||||||
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA256)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -5,17 +5,19 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/skip2/go-qrcode"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/skip2/go-qrcode"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
|
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
@ -311,11 +313,14 @@ func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
|
|||||||
|
|
||||||
// 旧版本上传,家庭云不支持覆盖
|
// 旧版本上传,家庭云不支持覆盖
|
||||||
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||||
tempFile, err := file.CacheFullInTempFile()
|
fileMd5 := file.GetHash().GetHash(utils.MD5)
|
||||||
if err != nil {
|
var tempFile = file.GetFile()
|
||||||
return nil, err
|
var err error
|
||||||
|
if len(fileMd5) != utils.MD5.Width {
|
||||||
|
tempFile, fileMd5, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||||
|
} else if tempFile == nil {
|
||||||
|
tempFile, err = file.CacheFullAndWriter(&up, nil)
|
||||||
}
|
}
|
||||||
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -345,7 +350,7 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
|||||||
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily)
|
_, err := y.put(ctx, status.FileUploadUrl, header, true, tempFile, isFamily)
|
||||||
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -500,7 +500,8 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
ss, err := stream.NewStreamSectionReader(file, int(sliceSize))
|
|
||||||
|
ss, err := stream.NewStreamSectionReader(file, int(sliceSize), &up)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -581,7 +582,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
After: func(err error) {
|
After: func(err error) {
|
||||||
ss.RecycleSectionReader(reader)
|
ss.FreeSectionReader(reader)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@ -857,9 +858,7 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo
|
|||||||
|
|
||||||
// 旧版本上传,家庭云不支持覆盖
|
// 旧版本上传,家庭云不支持覆盖
|
||||||
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
tempFile, fileMd5, err := stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
|
||||||
tempFile, fileMd5, err := stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"net/url"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -12,6 +13,7 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
@ -160,25 +162,18 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
sign.Sign(reqPath)),
|
sign.Sign(reqPath)),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resultLink := *link
|
||||||
|
resultLink.SyncClosers = utils.NewSyncClosers(link)
|
||||||
if args.Redirect {
|
if args.Redirect {
|
||||||
return link, nil
|
return &resultLink, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
resultLink := &model.Link{
|
|
||||||
URL: link.URL,
|
|
||||||
Header: link.Header,
|
|
||||||
RangeReader: link.RangeReader,
|
|
||||||
MFile: link.MFile,
|
|
||||||
Concurrency: link.Concurrency,
|
|
||||||
PartSize: link.PartSize,
|
|
||||||
ContentLength: link.ContentLength,
|
|
||||||
SyncClosers: utils.NewSyncClosers(link),
|
|
||||||
}
|
|
||||||
if resultLink.ContentLength == 0 {
|
if resultLink.ContentLength == 0 {
|
||||||
resultLink.ContentLength = fi.GetSize()
|
resultLink.ContentLength = fi.GetSize()
|
||||||
}
|
}
|
||||||
if resultLink.MFile != nil {
|
if resultLink.MFile != nil {
|
||||||
return resultLink, nil
|
return &resultLink, nil
|
||||||
}
|
}
|
||||||
if d.DownloadConcurrency > 0 {
|
if d.DownloadConcurrency > 0 {
|
||||||
resultLink.Concurrency = d.DownloadConcurrency
|
resultLink.Concurrency = d.DownloadConcurrency
|
||||||
@ -186,7 +181,7 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
if d.DownloadPartSize > 0 {
|
if d.DownloadPartSize > 0 {
|
||||||
resultLink.PartSize = d.DownloadPartSize * utils.KB
|
resultLink.PartSize = d.DownloadPartSize * utils.KB
|
||||||
}
|
}
|
||||||
return resultLink, nil
|
return &resultLink, nil
|
||||||
}
|
}
|
||||||
return nil, errs.ObjectNotFound
|
return nil, errs.ObjectNotFound
|
||||||
}
|
}
|
||||||
@ -313,24 +308,29 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer,
|
|||||||
reqPath, err := d.getReqPath(ctx, dstDir, true)
|
reqPath, err := d.getReqPath(ctx, dstDir, true)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if len(reqPath) == 1 {
|
if len(reqPath) == 1 {
|
||||||
return fs.PutDirectly(ctx, *reqPath[0], &stream.FileStream{
|
storage, reqActualPath, err := op.GetStorageAndActualPath(*reqPath[0])
|
||||||
Obj: s,
|
|
||||||
Mimetype: s.GetMimetype(),
|
|
||||||
WebPutAsTask: s.NeedStore(),
|
|
||||||
Reader: s,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
file, err := s.CacheFullInTempFile()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, path := range reqPath {
|
return op.Put(ctx, storage, reqActualPath, &stream.FileStream{
|
||||||
|
Obj: s,
|
||||||
|
Mimetype: s.GetMimetype(),
|
||||||
|
Reader: s,
|
||||||
|
}, up)
|
||||||
|
} else {
|
||||||
|
file, err := s.CacheFullAndWriter(nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
count := float64(len(reqPath) + 1)
|
||||||
|
up(100 / count)
|
||||||
|
for i, path := range reqPath {
|
||||||
err = errors.Join(err, fs.PutDirectly(ctx, *path, &stream.FileStream{
|
err = errors.Join(err, fs.PutDirectly(ctx, *path, &stream.FileStream{
|
||||||
Obj: s,
|
Obj: s,
|
||||||
Mimetype: s.GetMimetype(),
|
Mimetype: s.GetMimetype(),
|
||||||
WebPutAsTask: s.NeedStore(),
|
Reader: file,
|
||||||
Reader: file,
|
|
||||||
}))
|
}))
|
||||||
|
up(float64(i+2) / float64(count) * 100)
|
||||||
_, e := file.Seek(0, io.SeekStart)
|
_, e := file.Seek(0, io.SeekStart)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return errors.Join(err, e)
|
return errors.Join(err, e)
|
||||||
@ -402,10 +402,24 @@ func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveIn
|
|||||||
return nil, errs.ObjectNotFound
|
return nil, errs.ObjectNotFound
|
||||||
}
|
}
|
||||||
for _, dst := range dsts {
|
for _, dst := range dsts {
|
||||||
link, err := d.extract(ctx, dst, sub, args)
|
reqPath := stdpath.Join(dst, sub)
|
||||||
if err == nil {
|
link, err := d.extract(ctx, reqPath, args)
|
||||||
return link, nil
|
if err != nil {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
if link == nil {
|
||||||
|
return &model.Link{
|
||||||
|
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
|
||||||
|
common.GetApiUrl(ctx),
|
||||||
|
utils.EncodePath(reqPath, true),
|
||||||
|
utils.EncodePath(args.InnerPath, true),
|
||||||
|
url.QueryEscape(args.Password),
|
||||||
|
sign.SignArchive(reqPath)),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
resultLink := *link
|
||||||
|
resultLink.SyncClosers = utils.NewSyncClosers(link)
|
||||||
|
return &resultLink, nil
|
||||||
}
|
}
|
||||||
return nil, errs.NotImplement
|
return nil, errs.NotImplement
|
||||||
}
|
}
|
||||||
|
@ -2,8 +2,6 @@ package alias
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -12,8 +10,6 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -140,8 +136,7 @@ func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.Arc
|
|||||||
return nil, errs.NotImplement
|
return nil, errs.NotImplement
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) {
|
func (d *Alias) extract(ctx context.Context, reqPath string, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||||
reqPath := stdpath.Join(dst, sub)
|
|
||||||
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -149,20 +144,12 @@ func (d *Alias) extract(ctx context.Context, dst, sub string, args model.Archive
|
|||||||
if _, ok := storage.(driver.ArchiveReader); !ok {
|
if _, ok := storage.(driver.ArchiveReader); !ok {
|
||||||
return nil, errs.NotImplement
|
return nil, errs.NotImplement
|
||||||
}
|
}
|
||||||
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(sub)) {
|
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(reqPath)) {
|
||||||
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
_, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||||
if err != nil {
|
if err == nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
link := &model.Link{
|
return nil, nil
|
||||||
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
|
|
||||||
common.GetApiUrl(ctx),
|
|
||||||
utils.EncodePath(reqPath, true),
|
|
||||||
utils.EncodePath(args.InnerPath, true),
|
|
||||||
url.QueryEscape(args.Password),
|
|
||||||
sign.SignArchive(reqPath)),
|
|
||||||
}
|
|
||||||
return link, nil
|
|
||||||
}
|
}
|
||||||
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
|
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
|
||||||
return link, err
|
return link, err
|
||||||
|
@ -191,9 +191,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
|||||||
|
|
||||||
hash := stream.GetHash().GetHash(utils.SHA1)
|
hash := stream.GetHash().GetHash(utils.SHA1)
|
||||||
if len(hash) != utils.SHA1.Width {
|
if len(hash) != utils.SHA1.Width {
|
||||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
_, hash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
|
||||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
|
||||||
_, hash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA1)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -218,14 +216,13 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
|||||||
if !createResp.RapidUpload {
|
if !createResp.RapidUpload {
|
||||||
// 2. normal upload
|
// 2. normal upload
|
||||||
log.Debugf("[aliyundive_open] normal upload")
|
log.Debugf("[aliyundive_open] normal upload")
|
||||||
|
ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize), &up)
|
||||||
preTime := time.Now()
|
|
||||||
var offset, length int64 = 0, partSize
|
|
||||||
//var length
|
|
||||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
preTime := time.Now()
|
||||||
|
var offset, length int64 = 0, partSize
|
||||||
for i := 0; i < len(createResp.PartInfoList); i++ {
|
for i := 0; i < len(createResp.PartInfoList); i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return nil, ctx.Err()
|
return nil, ctx.Err()
|
||||||
@ -253,7 +250,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
|||||||
retry.Attempts(3),
|
retry.Attempts(3),
|
||||||
retry.DelayType(retry.BackOffDelay),
|
retry.DelayType(retry.BackOffDelay),
|
||||||
retry.Delay(time.Second))
|
retry.Delay(time.Second))
|
||||||
ss.RecycleSectionReader(rd)
|
ss.FreeSectionReader(rd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -237,15 +237,16 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||||
|
DEFAULT := int64(u.ChunkSize)
|
||||||
|
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
uploadUrl := u.UploadURLs[0]
|
uploadUrl := u.UploadURLs[0]
|
||||||
credential := u.Credential
|
credential := u.Credential
|
||||||
var finish int64 = 0
|
var finish int64 = 0
|
||||||
var chunk int = 0
|
var chunk int = 0
|
||||||
DEFAULT := int64(u.ChunkSize)
|
|
||||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for finish < stream.GetSize() {
|
for finish < stream.GetSize() {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@ -294,7 +295,7 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
|||||||
retry.DelayType(retry.BackOffDelay),
|
retry.DelayType(retry.BackOffDelay),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
)
|
)
|
||||||
ss.RecycleSectionReader(rd)
|
ss.FreeSectionReader(rd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -306,13 +307,14 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||||
uploadUrl := u.UploadURLs[0]
|
|
||||||
var finish int64 = 0
|
|
||||||
DEFAULT := int64(u.ChunkSize)
|
DEFAULT := int64(u.ChunkSize)
|
||||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
|
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uploadUrl := u.UploadURLs[0]
|
||||||
|
var finish int64 = 0
|
||||||
for finish < stream.GetSize() {
|
for finish < stream.GetSize() {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@ -353,7 +355,7 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
|||||||
retry.DelayType(retry.BackOffDelay),
|
retry.DelayType(retry.BackOffDelay),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
)
|
)
|
||||||
ss.RecycleSectionReader(rd)
|
ss.FreeSectionReader(rd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -367,14 +369,15 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||||
var finish int64 = 0
|
|
||||||
var chunk int = 0
|
|
||||||
var etags []string
|
|
||||||
DEFAULT := int64(u.ChunkSize)
|
DEFAULT := int64(u.ChunkSize)
|
||||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
|
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var finish int64 = 0
|
||||||
|
var chunk int = 0
|
||||||
|
var etags []string
|
||||||
for finish < stream.GetSize() {
|
for finish < stream.GetSize() {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@ -415,7 +418,7 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
|
|||||||
retry.DelayType(retry.BackOffDelay),
|
retry.DelayType(retry.BackOffDelay),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
)
|
)
|
||||||
ss.RecycleSectionReader(rd)
|
ss.FreeSectionReader(rd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -252,15 +252,16 @@ func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u Fi
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||||
|
DEFAULT := int64(u.ChunkSize)
|
||||||
|
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
uploadUrl := u.UploadUrls[0]
|
uploadUrl := u.UploadUrls[0]
|
||||||
credential := u.Credential
|
credential := u.Credential
|
||||||
var finish int64 = 0
|
var finish int64 = 0
|
||||||
var chunk int = 0
|
var chunk int = 0
|
||||||
DEFAULT := int64(u.ChunkSize)
|
|
||||||
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for finish < file.GetSize() {
|
for finish < file.GetSize() {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@ -309,7 +310,7 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
|
|||||||
retry.DelayType(retry.BackOffDelay),
|
retry.DelayType(retry.BackOffDelay),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
)
|
)
|
||||||
ss.RecycleSectionReader(rd)
|
ss.FreeSectionReader(rd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -321,13 +322,14 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||||
uploadUrl := u.UploadUrls[0]
|
|
||||||
var finish int64 = 0
|
|
||||||
DEFAULT := int64(u.ChunkSize)
|
DEFAULT := int64(u.ChunkSize)
|
||||||
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT))
|
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uploadUrl := u.UploadUrls[0]
|
||||||
|
var finish int64 = 0
|
||||||
for finish < file.GetSize() {
|
for finish < file.GetSize() {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@ -369,7 +371,7 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
|
|||||||
retry.DelayType(retry.BackOffDelay),
|
retry.DelayType(retry.BackOffDelay),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
)
|
)
|
||||||
ss.RecycleSectionReader(rd)
|
ss.FreeSectionReader(rd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -383,14 +385,15 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||||
var finish int64 = 0
|
|
||||||
var chunk int = 0
|
|
||||||
var etags []string
|
|
||||||
DEFAULT := int64(u.ChunkSize)
|
DEFAULT := int64(u.ChunkSize)
|
||||||
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT))
|
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var finish int64 = 0
|
||||||
|
var chunk int = 0
|
||||||
|
var etags []string
|
||||||
for finish < file.GetSize() {
|
for finish < file.GetSize() {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@ -432,7 +435,7 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
|
|||||||
retry.DelayType(retry.BackOffDelay),
|
retry.DelayType(retry.BackOffDelay),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
)
|
)
|
||||||
ss.RecycleSectionReader(rd)
|
ss.FreeSectionReader(rd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -401,7 +401,6 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
|
|||||||
},
|
},
|
||||||
Reader: wrappedIn,
|
Reader: wrappedIn,
|
||||||
Mimetype: "application/octet-stream",
|
Mimetype: "application/octet-stream",
|
||||||
WebPutAsTask: streamer.NeedStore(),
|
|
||||||
ForceStreamUpload: true,
|
ForceStreamUpload: true,
|
||||||
Exist: streamer.GetExist(),
|
Exist: streamer.GetExist(),
|
||||||
}
|
}
|
||||||
|
@ -449,10 +449,11 @@ func (d *Doubao) uploadNode(uploadConfig *UploadConfig, dir model.Obj, file mode
|
|||||||
|
|
||||||
// Upload 普通上传实现
|
// Upload 普通上传实现
|
||||||
func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
|
func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
|
||||||
ss, err := stream.NewStreamSectionReader(file, int(file.GetSize()))
|
ss, err := stream.NewStreamSectionReader(file, int(file.GetSize()), &up)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
reader, err := ss.GetSectionReader(0, file.GetSize())
|
reader, err := ss.GetSectionReader(0, file.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -503,7 +504,7 @@ func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
ss.RecycleSectionReader(reader)
|
ss.FreeSectionReader(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -542,15 +543,15 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
|
|||||||
if config.InnerUploadAddress.AdvanceOption.SliceSize > 0 {
|
if config.InnerUploadAddress.AdvanceOption.SliceSize > 0 {
|
||||||
chunkSize = int64(config.InnerUploadAddress.AdvanceOption.SliceSize)
|
chunkSize = int64(config.InnerUploadAddress.AdvanceOption.SliceSize)
|
||||||
}
|
}
|
||||||
|
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
totalParts := (fileSize + chunkSize - 1) / chunkSize
|
totalParts := (fileSize + chunkSize - 1) / chunkSize
|
||||||
// 创建分片信息组
|
// 创建分片信息组
|
||||||
parts := make([]UploadPart, totalParts)
|
parts := make([]UploadPart, totalParts)
|
||||||
|
|
||||||
// 用 stream.NewStreamSectionReader 替代缓存临时文件
|
|
||||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
up(10.0) // 更新进度
|
up(10.0) // 更新进度
|
||||||
// 设置并行上传
|
// 设置并行上传
|
||||||
thread := min(int(totalParts), d.uploadThread)
|
thread := min(int(totalParts), d.uploadThread)
|
||||||
@ -641,7 +642,7 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
After: func(err error) {
|
After: func(err error) {
|
||||||
ss.RecycleSectionReader(reader)
|
ss.FreeSectionReader(reader)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -162,7 +162,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
|||||||
SetBody(driver.NewLimitedUploadStream(ctx, stream))
|
SetBody(driver.NewLimitedUploadStream(ctx, stream))
|
||||||
}, nil)
|
}, nil)
|
||||||
} else {
|
} else {
|
||||||
err = d.chunkUpload(ctx, stream, putUrl)
|
err = d.chunkUpload(ctx, stream, putUrl, up)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -254,13 +254,14 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
|||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string) error {
|
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
|
||||||
var defaultChunkSize = d.ChunkSize * 1024 * 1024
|
var defaultChunkSize = d.ChunkSize * 1024 * 1024
|
||||||
var offset int64 = 0
|
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)
|
||||||
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var offset int64 = 0
|
||||||
url += "?includeItemsFromAllDrives=true&supportsAllDrives=true"
|
url += "?includeItemsFromAllDrives=true&supportsAllDrives=true"
|
||||||
for offset < file.GetSize() {
|
for offset < file.GetSize() {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
@ -300,12 +301,13 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer,
|
|||||||
}
|
}
|
||||||
return fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
|
return fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
|
||||||
}
|
}
|
||||||
|
up(float64(offset+chunkSize) / float64(file.GetSize()) * 100)
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
retry.Attempts(3),
|
retry.Attempts(3),
|
||||||
retry.DelayType(retry.BackOffDelay),
|
retry.DelayType(retry.BackOffDelay),
|
||||||
retry.Delay(time.Second))
|
retry.Delay(time.Second))
|
||||||
ss.RecycleSectionReader(reader)
|
ss.FreeSectionReader(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -276,9 +276,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
|
|||||||
etag := s.GetHash().GetHash(utils.MD5)
|
etag := s.GetHash().GetHash(utils.MD5)
|
||||||
var err error
|
var err error
|
||||||
if len(etag) != utils.MD5.Width {
|
if len(etag) != utils.MD5.Width {
|
||||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
_, etag, err = stream.CacheFullAndHash(s, &up, utils.MD5)
|
||||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
|
||||||
_, etag, err = stream.CacheFullInTempFileAndHash(s, cacheFileProgress, utils.MD5)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -180,7 +180,7 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, file model.FileS
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tempFile, err := file.CacheFullInTempFile()
|
tempFile, err := file.CacheFullAndWriter(&up, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -263,7 +263,7 @@ func (d *MoPan) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
file, err := stream.CacheFullInTempFile()
|
file, err := stream.CacheFullAndWriter(&up, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -223,7 +223,7 @@ func (d *NeteaseMusic) removeSongObj(file model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *NeteaseMusic) putSongStream(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *NeteaseMusic) putSongStream(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
tmp, err := stream.CacheFullInTempFile()
|
tmp, err := stream.CacheFullAndWriter(&up, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -238,13 +238,14 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
|
||||||
var finish int64 = 0
|
|
||||||
DEFAULT := d.ChunkSize * 1024 * 1024
|
DEFAULT := d.ChunkSize * 1024 * 1024
|
||||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
|
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||||
|
var finish int64 = 0
|
||||||
for finish < stream.GetSize() {
|
for finish < stream.GetSize() {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@ -285,7 +286,7 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
retry.DelayType(retry.BackOffDelay),
|
retry.DelayType(retry.BackOffDelay),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
)
|
)
|
||||||
ss.RecycleSectionReader(rd)
|
ss.FreeSectionReader(rd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -152,13 +152,14 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
|
||||||
var finish int64 = 0
|
|
||||||
DEFAULT := d.ChunkSize * 1024 * 1024
|
DEFAULT := d.ChunkSize * 1024 * 1024
|
||||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
|
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||||
|
var finish int64 = 0
|
||||||
for finish < stream.GetSize() {
|
for finish < stream.GetSize() {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@ -199,7 +200,7 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
|||||||
retry.DelayType(retry.BackOffDelay),
|
retry.DelayType(retry.BackOffDelay),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
)
|
)
|
||||||
ss.RecycleSectionReader(rd)
|
ss.FreeSectionReader(rd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
hash_extend "github.com/OpenListTeam/OpenList/v4/pkg/utils/hash"
|
hash_extend "github.com/OpenListTeam/OpenList/v4/pkg/utils/hash"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
@ -212,15 +213,11 @@ func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
hi := stream.GetHash()
|
sha1Str := stream.GetHash().GetHash(hash_extend.GCID)
|
||||||
sha1Str := hi.GetHash(hash_extend.GCID)
|
|
||||||
if len(sha1Str) < hash_extend.GCID.Width {
|
|
||||||
tFile, err := stream.CacheFullInTempFile()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
sha1Str, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
|
if len(sha1Str) < hash_extend.GCID.Width {
|
||||||
|
var err error
|
||||||
|
_, sha1Str, err = streamPkg.CacheFullAndHash(stream, &up, hash_extend.GCID, stream.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -438,20 +438,19 @@ func (d *PikPak) UploadByOSS(ctx context.Context, params *S3Params, s model.File
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *PikPak) UploadByMultipart(ctx context.Context, params *S3Params, fileSize int64, s model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *PikPak) UploadByMultipart(ctx context.Context, params *S3Params, fileSize int64, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
|
tmpF, err := s.CacheFullAndWriter(&up, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
chunks []oss.FileChunk
|
chunks []oss.FileChunk
|
||||||
parts []oss.UploadPart
|
parts []oss.UploadPart
|
||||||
imur oss.InitiateMultipartUploadResult
|
imur oss.InitiateMultipartUploadResult
|
||||||
ossClient *oss.Client
|
ossClient *oss.Client
|
||||||
bucket *oss.Bucket
|
bucket *oss.Bucket
|
||||||
err error
|
|
||||||
)
|
)
|
||||||
|
|
||||||
tmpF, err := s.CacheFullInTempFile()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if ossClient, err = oss.New(params.Endpoint, params.AccessKeyID, params.AccessKeySecret); err != nil {
|
if ossClient, err = oss.New(params.Endpoint, params.AccessKeyID, params.AccessKeySecret); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,6 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
)
|
)
|
||||||
@ -158,9 +157,7 @@ func (d *QuarkOpen) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(writers) > 0 {
|
if len(writers) > 0 {
|
||||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
_, err := stream.CacheFullAndWriter(&up, io.MultiWriter(writers...))
|
||||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
|
||||||
_, err := streamPkg.CacheFullInTempFileAndWriter(stream, cacheFileProgress, io.MultiWriter(writers...))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,6 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
@ -144,9 +143,7 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(writers) > 0 {
|
if len(writers) > 0 {
|
||||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
_, err := stream.CacheFullAndWriter(&up, io.MultiWriter(writers...))
|
||||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
|
||||||
_, err := streamPkg.CacheFullInTempFileAndWriter(stream, cacheFileProgress, io.MultiWriter(writers...))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -173,8 +173,9 @@ func (d *Strm) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// 没有修改link的字段,可直接返回
|
resultLink := *link
|
||||||
return link, nil
|
resultLink.SyncClosers = utils.NewSyncClosers(link)
|
||||||
|
return &resultLink, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*Strm)(nil)
|
var _ driver.Driver = (*Strm)(nil)
|
||||||
|
@ -179,7 +179,7 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
}
|
}
|
||||||
|
|
||||||
// upload chunks
|
// upload chunks
|
||||||
tempFile, err := stream.CacheFullInTempFile()
|
tempFile, err := stream.CacheFullAndWriter(&up, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -371,9 +371,7 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.Fi
|
|||||||
gcid := file.GetHash().GetHash(hash_extend.GCID)
|
gcid := file.GetHash().GetHash(hash_extend.GCID)
|
||||||
var err error
|
var err error
|
||||||
if len(gcid) < hash_extend.GCID.Width {
|
if len(gcid) < hash_extend.GCID.Width {
|
||||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
_, gcid, err = stream.CacheFullAndHash(file, &up, hash_extend.GCID, file.GetSize())
|
||||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
|
||||||
_, gcid, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, hash_extend.GCID, file.GetSize())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -491,9 +491,7 @@ func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream
|
|||||||
gcid := stream.GetHash().GetHash(hash_extend.GCID)
|
gcid := stream.GetHash().GetHash(hash_extend.GCID)
|
||||||
var err error
|
var err error
|
||||||
if len(gcid) < hash_extend.GCID.Width {
|
if len(gcid) < hash_extend.GCID.Width {
|
||||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
_, gcid, err = streamPkg.CacheFullAndHash(stream, &up, hash_extend.GCID, stream.GetSize())
|
||||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
|
||||||
_, gcid, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, hash_extend.GCID, stream.GetSize())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -372,9 +372,7 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, file model.F
|
|||||||
gcid := file.GetHash().GetHash(hash_extend.GCID)
|
gcid := file.GetHash().GetHash(hash_extend.GCID)
|
||||||
var err error
|
var err error
|
||||||
if len(gcid) < hash_extend.GCID.Width {
|
if len(gcid) < hash_extend.GCID.Width {
|
||||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
_, gcid, err = stream.CacheFullAndHash(file, &up, hash_extend.GCID, file.GetSize())
|
||||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
|
||||||
_, gcid, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, hash_extend.GCID, file.GetSize())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -317,7 +317,7 @@ func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
if folder, ok = dstDir.(*Folder); !ok {
|
if folder, ok = dstDir.(*Folder); !ok {
|
||||||
return nil, errs.NotSupport
|
return nil, errs.NotSupport
|
||||||
}
|
}
|
||||||
file, err := stream.CacheFullInTempFile()
|
file, err := stream.CacheFullAndWriter(&up, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -91,7 +91,7 @@ func InitConfig() {
|
|||||||
} else {
|
} else {
|
||||||
conf.MaxBufferLimit = conf.Conf.MaxBufferLimit * utils.MB
|
conf.MaxBufferLimit = conf.Conf.MaxBufferLimit * utils.MB
|
||||||
}
|
}
|
||||||
log.Infof("max buffer limit: %d", conf.MaxBufferLimit)
|
log.Infof("max buffer limit: %dMB", conf.MaxBufferLimit/utils.MB)
|
||||||
if !conf.Conf.Force {
|
if !conf.Conf.Force {
|
||||||
confFromEnv()
|
confFromEnv()
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ var PrivacyReg []*regexp.Regexp
|
|||||||
var (
|
var (
|
||||||
// StoragesLoaded loaded success if empty
|
// StoragesLoaded loaded success if empty
|
||||||
StoragesLoaded = false
|
StoragesLoaded = false
|
||||||
MaxBufferLimit int
|
MaxBufferLimit = 16 * 1024 * 1024
|
||||||
)
|
)
|
||||||
var (
|
var (
|
||||||
RawIndexHtml string
|
RawIndexHtml string
|
||||||
|
@ -70,25 +70,25 @@ func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadT
|
|||||||
}()
|
}()
|
||||||
var decompressUp model.UpdateProgress
|
var decompressUp model.UpdateProgress
|
||||||
if t.CacheFull {
|
if t.CacheFull {
|
||||||
var total, cur int64 = 0, 0
|
total := int64(0)
|
||||||
for _, s := range ss {
|
for _, s := range ss {
|
||||||
total += s.GetSize()
|
total += s.GetSize()
|
||||||
}
|
}
|
||||||
t.SetTotalBytes(total)
|
t.SetTotalBytes(total)
|
||||||
t.Status = "getting src object"
|
t.Status = "getting src object"
|
||||||
for _, s := range ss {
|
part := 100 / float64(len(ss)+1)
|
||||||
if s.GetFile() == nil {
|
for i, s := range ss {
|
||||||
_, err = stream.CacheFullInTempFileAndWriter(s, func(p float64) {
|
if s.GetFile() != nil {
|
||||||
t.SetProgress((float64(cur) + float64(s.GetSize())*p/100.0) / float64(total))
|
continue
|
||||||
}, nil)
|
|
||||||
}
|
}
|
||||||
cur += s.GetSize()
|
_, err = s.CacheFullAndWriter(nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
} else {
|
||||||
|
t.SetProgress(float64(i+1) * part)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
t.SetProgress(100.0)
|
decompressUp = model.UpdateProgressWithRange(t.SetProgress, 100-part, 100)
|
||||||
decompressUp = func(_ float64) {}
|
|
||||||
} else {
|
} else {
|
||||||
decompressUp = t.SetProgress
|
decompressUp = t.SetProgress
|
||||||
}
|
}
|
||||||
|
@ -69,7 +69,7 @@ func putAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer)
|
|||||||
return nil, errors.WithStack(errs.UploadNotSupported)
|
return nil, errors.WithStack(errs.UploadNotSupported)
|
||||||
}
|
}
|
||||||
if file.NeedStore() {
|
if file.NeedStore() {
|
||||||
_, err := file.CacheFullInTempFile()
|
_, err := file.CacheFullAndWriter(nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to create temp file")
|
return nil, errors.Wrapf(err, "failed to create temp file")
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,6 @@ package model
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -40,16 +39,17 @@ type FileStreamer interface {
|
|||||||
utils.ClosersIF
|
utils.ClosersIF
|
||||||
Obj
|
Obj
|
||||||
GetMimetype() string
|
GetMimetype() string
|
||||||
//SetReader(io.Reader)
|
|
||||||
NeedStore() bool
|
NeedStore() bool
|
||||||
IsForceStreamUpload() bool
|
IsForceStreamUpload() bool
|
||||||
GetExist() Obj
|
GetExist() Obj
|
||||||
SetExist(Obj)
|
SetExist(Obj)
|
||||||
//for a non-seekable Stream, RangeRead supports peeking some data, and CacheFullInTempFile still works
|
// for a non-seekable Stream, RangeRead supports peeking some data, and CacheFullAndWriter still works
|
||||||
RangeRead(http_range.Range) (io.Reader, error)
|
RangeRead(http_range.Range) (io.Reader, error)
|
||||||
//for a non-seekable Stream, if Read is called, this function won't work
|
// for a non-seekable Stream, if Read is called, this function won't work.
|
||||||
CacheFullInTempFile() (File, error)
|
// caches the full Stream and writes it to writer (if provided, even if the stream is already cached).
|
||||||
SetTmpFile(r *os.File)
|
CacheFullAndWriter(up *UpdateProgress, writer io.Writer) (File, error)
|
||||||
|
SetTmpFile(file File)
|
||||||
|
// if the Stream is not a File and is not cached, returns nil.
|
||||||
GetFile() File
|
GetFile() File
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
package stream
|
package stream
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -13,6 +12,7 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/buffer"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
"go4.org/readerutil"
|
"go4.org/readerutil"
|
||||||
@ -27,13 +27,19 @@ type FileStream struct {
|
|||||||
ForceStreamUpload bool
|
ForceStreamUpload bool
|
||||||
Exist model.Obj //the file existed in the destination, we can reuse some info since we wil overwrite it
|
Exist model.Obj //the file existed in the destination, we can reuse some info since we wil overwrite it
|
||||||
utils.Closers
|
utils.Closers
|
||||||
tmpFile *os.File //if present, tmpFile has full content, it will be deleted at last
|
|
||||||
peekBuff *bytes.Reader
|
tmpFile model.File //if present, tmpFile has full content, it will be deleted at last
|
||||||
|
peekBuff *buffer.Reader
|
||||||
|
size int64
|
||||||
|
oriReader io.Reader // the original reader, used for caching
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FileStream) GetSize() int64 {
|
func (f *FileStream) GetSize() int64 {
|
||||||
if f.tmpFile != nil {
|
if f.size > 0 {
|
||||||
info, err := f.tmpFile.Stat()
|
return f.size
|
||||||
|
}
|
||||||
|
if file, ok := f.tmpFile.(*os.File); ok {
|
||||||
|
info, err := file.Stat()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return info.Size()
|
return info.Size()
|
||||||
}
|
}
|
||||||
@ -60,14 +66,18 @@ func (f *FileStream) Close() error {
|
|||||||
if errors.Is(err1, os.ErrClosed) {
|
if errors.Is(err1, os.ErrClosed) {
|
||||||
err1 = nil
|
err1 = nil
|
||||||
}
|
}
|
||||||
if f.tmpFile != nil {
|
if file, ok := f.tmpFile.(*os.File); ok {
|
||||||
err2 = os.RemoveAll(f.tmpFile.Name())
|
err2 = os.RemoveAll(file.Name())
|
||||||
if err2 != nil {
|
if err2 != nil {
|
||||||
err2 = errs.NewErr(err2, "failed to remove tmpFile [%s]", f.tmpFile.Name())
|
err2 = errs.NewErr(err2, "failed to remove tmpFile [%s]", file.Name())
|
||||||
} else {
|
} else {
|
||||||
f.tmpFile = nil
|
f.tmpFile = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if f.peekBuff != nil {
|
||||||
|
f.peekBuff.Reset()
|
||||||
|
f.peekBuff = nil
|
||||||
|
}
|
||||||
|
|
||||||
return errors.Join(err1, err2)
|
return errors.Join(err1, err2)
|
||||||
}
|
}
|
||||||
@ -79,20 +89,55 @@ func (f *FileStream) SetExist(obj model.Obj) {
|
|||||||
f.Exist = obj
|
f.Exist = obj
|
||||||
}
|
}
|
||||||
|
|
||||||
// CacheFullInTempFile save all data into tmpFile. Not recommended since it wears disk,
|
// CacheFullAndWriter save all data into tmpFile or memory.
|
||||||
// and can't start upload until the file is written. It's not thread-safe!
|
// It's not thread-safe!
|
||||||
func (f *FileStream) CacheFullInTempFile() (model.File, error) {
|
func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writer) (model.File, error) {
|
||||||
if file := f.GetFile(); file != nil {
|
if cache := f.GetFile(); cache != nil {
|
||||||
return file, nil
|
if writer == nil {
|
||||||
|
return cache, nil
|
||||||
|
}
|
||||||
|
_, err := cache.Seek(0, io.SeekStart)
|
||||||
|
if err == nil {
|
||||||
|
reader := f.Reader
|
||||||
|
if up != nil {
|
||||||
|
cacheProgress := model.UpdateProgressWithRange(*up, 0, 50)
|
||||||
|
*up = model.UpdateProgressWithRange(*up, 50, 100)
|
||||||
|
reader = &ReaderUpdatingProgress{
|
||||||
|
Reader: &SimpleReaderWithSize{
|
||||||
|
Reader: reader,
|
||||||
|
Size: f.GetSize(),
|
||||||
|
},
|
||||||
|
UpdateProgress: cacheProgress,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, err = utils.CopyWithBuffer(writer, reader)
|
||||||
|
if err == nil {
|
||||||
|
_, err = cache.Seek(0, io.SeekStart)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cache, nil
|
||||||
}
|
}
|
||||||
tmpF, err := utils.CreateTempFile(f.Reader, f.GetSize())
|
|
||||||
if err != nil {
|
reader := f.Reader
|
||||||
return nil, err
|
if up != nil {
|
||||||
|
cacheProgress := model.UpdateProgressWithRange(*up, 0, 50)
|
||||||
|
*up = model.UpdateProgressWithRange(*up, 50, 100)
|
||||||
|
reader = &ReaderUpdatingProgress{
|
||||||
|
Reader: &SimpleReaderWithSize{
|
||||||
|
Reader: reader,
|
||||||
|
Size: f.GetSize(),
|
||||||
|
},
|
||||||
|
UpdateProgress: cacheProgress,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
f.Add(tmpF)
|
if writer != nil {
|
||||||
f.tmpFile = tmpF
|
reader = io.TeeReader(reader, writer)
|
||||||
f.Reader = tmpF
|
}
|
||||||
return tmpF, nil
|
f.Reader = reader
|
||||||
|
return f.cache(f.GetSize())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FileStream) GetFile() model.File {
|
func (f *FileStream) GetFile() model.File {
|
||||||
@ -106,40 +151,68 @@ func (f *FileStream) GetFile() model.File {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RangeRead have to cache all data first since only Reader is provided.
|
// RangeRead have to cache all data first since only Reader is provided.
|
||||||
// also support a peeking RangeRead at very start, but won't buffer more than conf.MaxBufferLimit data in memory
|
// It's not thread-safe!
|
||||||
func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||||
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > f.GetSize() {
|
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > f.GetSize() {
|
||||||
httpRange.Length = f.GetSize() - httpRange.Start
|
httpRange.Length = f.GetSize() - httpRange.Start
|
||||||
}
|
}
|
||||||
var cache io.ReaderAt = f.GetFile()
|
if f.GetFile() != nil {
|
||||||
if cache != nil {
|
return io.NewSectionReader(f.GetFile(), httpRange.Start, httpRange.Length), nil
|
||||||
return io.NewSectionReader(cache, httpRange.Start, httpRange.Length), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size := httpRange.Start + httpRange.Length
|
size := httpRange.Start + httpRange.Length
|
||||||
if f.peekBuff != nil && size <= int64(f.peekBuff.Len()) {
|
if f.peekBuff != nil && size <= int64(f.peekBuff.Len()) {
|
||||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||||
}
|
}
|
||||||
if size <= int64(conf.MaxBufferLimit) {
|
|
||||||
bufSize := min(size, f.GetSize())
|
cache, err := f.cache(size)
|
||||||
// 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom
|
if err != nil {
|
||||||
// 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大
|
return nil, err
|
||||||
buf := make([]byte, bufSize)
|
}
|
||||||
n, err := io.ReadFull(f.Reader, buf)
|
|
||||||
if err != nil {
|
return io.NewSectionReader(cache, httpRange.Start, httpRange.Length), nil
|
||||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err)
|
}
|
||||||
}
|
|
||||||
f.peekBuff = bytes.NewReader(buf)
|
// *旧笔记
|
||||||
f.Reader = io.MultiReader(f.peekBuff, f.Reader)
|
// 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom
|
||||||
cache = f.peekBuff
|
// 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大
|
||||||
} else {
|
|
||||||
var err error
|
func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
||||||
cache, err = f.CacheFullInTempFile()
|
if maxCacheSize > int64(conf.MaxBufferLimit) {
|
||||||
|
tmpF, err := utils.CreateTempFile(f.Reader, f.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
f.Add(tmpF)
|
||||||
|
f.tmpFile = tmpF
|
||||||
|
f.Reader = tmpF
|
||||||
|
return tmpF, nil
|
||||||
}
|
}
|
||||||
return io.NewSectionReader(cache, httpRange.Start, httpRange.Length), nil
|
|
||||||
|
if f.peekBuff == nil {
|
||||||
|
f.peekBuff = &buffer.Reader{}
|
||||||
|
f.oriReader = f.Reader
|
||||||
|
}
|
||||||
|
bufSize := maxCacheSize - int64(f.peekBuff.Len())
|
||||||
|
buf := make([]byte, bufSize)
|
||||||
|
n, err := io.ReadFull(f.oriReader, buf)
|
||||||
|
if bufSize != int64(n) {
|
||||||
|
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err)
|
||||||
|
}
|
||||||
|
f.peekBuff.Append(buf)
|
||||||
|
if int64(f.peekBuff.Len()) >= f.GetSize() {
|
||||||
|
f.Reader = f.peekBuff
|
||||||
|
f.oriReader = nil
|
||||||
|
} else {
|
||||||
|
f.Reader = io.MultiReader(f.peekBuff, f.oriReader)
|
||||||
|
}
|
||||||
|
return f.peekBuff, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FileStream) SetTmpFile(file model.File) {
|
||||||
|
f.AddIfCloser(file)
|
||||||
|
f.tmpFile = file
|
||||||
|
f.Reader = file
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ model.FileStreamer = (*SeekableStream)(nil)
|
var _ model.FileStreamer = (*SeekableStream)(nil)
|
||||||
@ -156,7 +229,6 @@ type SeekableStream struct {
|
|||||||
*FileStream
|
*FileStream
|
||||||
// should have one of belows to support rangeRead
|
// should have one of belows to support rangeRead
|
||||||
rangeReadCloser model.RangeReadCloserIF
|
rangeReadCloser model.RangeReadCloserIF
|
||||||
size int64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error) {
|
func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error) {
|
||||||
@ -178,38 +250,26 @@ func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if _, ok := rr.(*model.FileRangeReader); ok {
|
|
||||||
fs.Reader, err = rr.RangeRead(fs.Ctx, http_range.Range{Length: -1})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
fs.Add(link)
|
|
||||||
return &SeekableStream{FileStream: fs, size: size}, nil
|
|
||||||
}
|
|
||||||
rrc := &model.RangeReadCloser{
|
rrc := &model.RangeReadCloser{
|
||||||
RangeReader: rr,
|
RangeReader: rr,
|
||||||
}
|
}
|
||||||
|
if _, ok := rr.(*model.FileRangeReader); ok {
|
||||||
|
fs.Reader, err = rrc.RangeRead(fs.Ctx, http_range.Range{Length: -1})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fs.size = size
|
||||||
fs.Add(link)
|
fs.Add(link)
|
||||||
fs.Add(rrc)
|
fs.Add(rrc)
|
||||||
return &SeekableStream{FileStream: fs, rangeReadCloser: rrc, size: size}, nil
|
return &SeekableStream{FileStream: fs, rangeReadCloser: rrc}, nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("illegal seekableStream")
|
return nil, fmt.Errorf("illegal seekableStream")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ss *SeekableStream) GetSize() int64 {
|
|
||||||
if ss.size > 0 {
|
|
||||||
return ss.size
|
|
||||||
}
|
|
||||||
return ss.FileStream.GetSize()
|
|
||||||
}
|
|
||||||
|
|
||||||
//func (ss *SeekableStream) Peek(length int) {
|
|
||||||
//
|
|
||||||
//}
|
|
||||||
|
|
||||||
// RangeRead is not thread-safe, pls use it in single thread only.
|
// RangeRead is not thread-safe, pls use it in single thread only.
|
||||||
func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||||
if ss.tmpFile == nil && ss.rangeReadCloser != nil {
|
if ss.GetFile() == nil && ss.rangeReadCloser != nil {
|
||||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, httpRange)
|
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, httpRange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -219,47 +279,37 @@ func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, erro
|
|||||||
return ss.FileStream.RangeRead(httpRange)
|
return ss.FileStream.RangeRead(httpRange)
|
||||||
}
|
}
|
||||||
|
|
||||||
//func (f *FileStream) GetReader() io.Reader {
|
|
||||||
// return f.Reader
|
|
||||||
//}
|
|
||||||
|
|
||||||
// only provide Reader as full stream when it's demanded. in rapid-upload, we can skip this to save memory
|
// only provide Reader as full stream when it's demanded. in rapid-upload, we can skip this to save memory
|
||||||
func (ss *SeekableStream) Read(p []byte) (n int, err error) {
|
func (ss *SeekableStream) Read(p []byte) (n int, err error) {
|
||||||
|
if err := ss.generateReader(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return ss.FileStream.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss *SeekableStream) generateReader() error {
|
||||||
if ss.Reader == nil {
|
if ss.Reader == nil {
|
||||||
if ss.rangeReadCloser == nil {
|
if ss.rangeReadCloser == nil {
|
||||||
return 0, fmt.Errorf("illegal seekableStream")
|
return fmt.Errorf("illegal seekableStream")
|
||||||
}
|
}
|
||||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, http_range.Range{Length: -1})
|
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, http_range.Range{Length: -1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return err
|
||||||
}
|
}
|
||||||
ss.Reader = rc
|
ss.Reader = rc
|
||||||
}
|
}
|
||||||
return ss.Reader.Read(p)
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) {
|
func (ss *SeekableStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writer) (model.File, error) {
|
||||||
if file := ss.GetFile(); file != nil {
|
if err := ss.generateReader(); err != nil {
|
||||||
return file, nil
|
|
||||||
}
|
|
||||||
tmpF, err := utils.CreateTempFile(ss, ss.GetSize())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
ss.Add(tmpF)
|
return ss.FileStream.CacheFullAndWriter(up, writer)
|
||||||
ss.tmpFile = tmpF
|
|
||||||
ss.Reader = tmpF
|
|
||||||
return tmpF, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileStream) SetTmpFile(r *os.File) {
|
|
||||||
f.Add(r)
|
|
||||||
f.tmpFile = r
|
|
||||||
f.Reader = r
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type ReaderWithSize interface {
|
type ReaderWithSize interface {
|
||||||
io.ReadCloser
|
io.Reader
|
||||||
GetSize() int64
|
GetSize() int64
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -293,7 +343,10 @@ func (r *ReaderUpdatingProgress) Read(p []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *ReaderUpdatingProgress) Close() error {
|
func (r *ReaderUpdatingProgress) Close() error {
|
||||||
return r.Reader.Close()
|
if c, ok := r.Reader.(io.Closer); ok {
|
||||||
|
return c.Close()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type RangeReadReadAtSeeker struct {
|
type RangeReadReadAtSeeker struct {
|
||||||
@ -311,19 +364,20 @@ type headCache struct {
|
|||||||
func (c *headCache) head(p []byte) (int, error) {
|
func (c *headCache) head(p []byte) (int, error) {
|
||||||
n := 0
|
n := 0
|
||||||
for _, buf := range c.bufs {
|
for _, buf := range c.bufs {
|
||||||
if len(buf)+n >= len(p) {
|
n += copy(p[n:], buf)
|
||||||
n += copy(p[n:], buf[:len(p)-n])
|
if n == len(p) {
|
||||||
return n, nil
|
return n, nil
|
||||||
} else {
|
|
||||||
n += copy(p[n:], buf)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
w, err := io.ReadAtLeast(c.reader, p[n:], 1)
|
nn, err := io.ReadFull(c.reader, p[n:])
|
||||||
if w > 0 {
|
if nn > 0 {
|
||||||
buf := make([]byte, w)
|
buf := make([]byte, nn)
|
||||||
copy(buf, p[n:n+w])
|
copy(buf, p[n:])
|
||||||
c.bufs = append(c.bufs, buf)
|
c.bufs = append(c.bufs, buf)
|
||||||
n += w
|
n += nn
|
||||||
|
if err == io.ErrUnexpectedEOF {
|
||||||
|
err = io.EOF
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
@ -422,6 +476,9 @@ func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (io.Reader, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *RangeReadReadAtSeeker) ReadAt(p []byte, off int64) (n int, err error) {
|
func (r *RangeReadReadAtSeeker) ReadAt(p []byte, off int64) (n int, err error) {
|
||||||
|
if off < 0 || off >= r.ss.GetSize() {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
if off == 0 && r.headCache != nil {
|
if off == 0 && r.headCache != nil {
|
||||||
return r.headCache.head(p)
|
return r.headCache.head(p)
|
||||||
}
|
}
|
||||||
@ -430,12 +487,15 @@ func (r *RangeReadReadAtSeeker) ReadAt(p []byte, off int64) (n int, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
n, err = io.ReadAtLeast(rr, p, 1)
|
n, err = io.ReadFull(rr, p)
|
||||||
off += int64(n)
|
if n > 0 {
|
||||||
if err == nil {
|
off += int64(n)
|
||||||
r.readerMap.Store(int64(off), rr)
|
switch err {
|
||||||
} else {
|
case nil:
|
||||||
rr = nil
|
r.readerMap.Store(int64(off), rr)
|
||||||
|
case io.ErrUnexpectedEOF:
|
||||||
|
err = io.EOF
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
@ -444,20 +504,14 @@ func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
|
|||||||
switch whence {
|
switch whence {
|
||||||
case io.SeekStart:
|
case io.SeekStart:
|
||||||
case io.SeekCurrent:
|
case io.SeekCurrent:
|
||||||
if offset == 0 {
|
|
||||||
return r.masterOff, nil
|
|
||||||
}
|
|
||||||
offset += r.masterOff
|
offset += r.masterOff
|
||||||
case io.SeekEnd:
|
case io.SeekEnd:
|
||||||
offset += r.ss.GetSize()
|
offset += r.ss.GetSize()
|
||||||
default:
|
default:
|
||||||
return 0, errs.NotSupport
|
return 0, errors.New("Seek: invalid whence")
|
||||||
}
|
}
|
||||||
if offset < 0 {
|
if offset < 0 || offset > r.ss.GetSize() {
|
||||||
return r.masterOff, errors.New("invalid seek: negative position")
|
return 0, errors.New("Seek: invalid offset")
|
||||||
}
|
|
||||||
if offset > r.ss.GetSize() {
|
|
||||||
offset = r.ss.GetSize()
|
|
||||||
}
|
}
|
||||||
r.masterOff = offset
|
r.masterOff = offset
|
||||||
return offset, nil
|
return offset, nil
|
||||||
@ -465,6 +519,8 @@ func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
|
|||||||
|
|
||||||
func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) {
|
func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) {
|
||||||
n, err = r.ReadAt(p, r.masterOff)
|
n, err = r.ReadAt(p, r.masterOff)
|
||||||
r.masterOff += int64(n)
|
if n > 0 {
|
||||||
|
r.masterOff += int64(n)
|
||||||
|
}
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
86
internal/stream/stream_test.go
Normal file
86
internal/stream/stream_test.go
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
package stream
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFileStream_RangeRead(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
httpRange http_range.Range
|
||||||
|
}
|
||||||
|
buf := []byte("github.com/OpenListTeam/OpenList")
|
||||||
|
f := &FileStream{
|
||||||
|
Obj: &model.Object{
|
||||||
|
Size: int64(len(buf)),
|
||||||
|
},
|
||||||
|
Reader: io.NopCloser(bytes.NewReader(buf)),
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
f *FileStream
|
||||||
|
args args
|
||||||
|
want func(f *FileStream, got io.Reader, err error) error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "range 11-12",
|
||||||
|
f: f,
|
||||||
|
args: args{
|
||||||
|
httpRange: http_range.Range{Start: 11, Length: 12},
|
||||||
|
},
|
||||||
|
want: func(f *FileStream, got io.Reader, err error) error {
|
||||||
|
if f.GetFile() != nil {
|
||||||
|
return errors.New("cached")
|
||||||
|
}
|
||||||
|
b, _ := io.ReadAll(got)
|
||||||
|
if !bytes.Equal(buf[11:11+12], b) {
|
||||||
|
return fmt.Errorf("=%s ,want =%s", b, buf[11:11+12])
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "range 11-21",
|
||||||
|
f: f,
|
||||||
|
args: args{
|
||||||
|
httpRange: http_range.Range{Start: 11, Length: 21},
|
||||||
|
},
|
||||||
|
want: func(f *FileStream, got io.Reader, err error) error {
|
||||||
|
if f.GetFile() == nil {
|
||||||
|
return errors.New("not cached")
|
||||||
|
}
|
||||||
|
b, _ := io.ReadAll(got)
|
||||||
|
if !bytes.Equal(buf[11:11+21], b) {
|
||||||
|
return fmt.Errorf("=%s ,want =%s", b, buf[11:11+21])
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := tt.f.RangeRead(tt.args.httpRange)
|
||||||
|
if err := tt.want(tt.f, got, err); err != nil {
|
||||||
|
t.Errorf("FileStream.RangeRead() %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
t.Run("after check", func(t *testing.T) {
|
||||||
|
if f.GetFile() == nil {
|
||||||
|
t.Error("not cached")
|
||||||
|
}
|
||||||
|
buf2 := make([]byte, len(buf))
|
||||||
|
if _, err := io.ReadFull(f, buf2); err != nil {
|
||||||
|
t.Errorf("FileStream.Read() error = %v", err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(buf, buf2) {
|
||||||
|
t.Errorf("FileStream.Read() = %s, want %s", buf2, buf)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
@ -141,56 +141,13 @@ func (r *ReaderWithCtx) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func CacheFullInTempFileAndWriter(stream model.FileStreamer, up model.UpdateProgress, w io.Writer) (model.File, error) {
|
func CacheFullAndHash(stream model.FileStreamer, up *model.UpdateProgress, hashType *utils.HashType, hashParams ...any) (model.File, string, error) {
|
||||||
if cache := stream.GetFile(); cache != nil {
|
|
||||||
if w != nil {
|
|
||||||
_, err := cache.Seek(0, io.SeekStart)
|
|
||||||
if err == nil {
|
|
||||||
var reader io.Reader = stream
|
|
||||||
if up != nil {
|
|
||||||
reader = &ReaderUpdatingProgress{
|
|
||||||
Reader: stream,
|
|
||||||
UpdateProgress: up,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_, err = utils.CopyWithBuffer(w, reader)
|
|
||||||
if err == nil {
|
|
||||||
_, err = cache.Seek(0, io.SeekStart)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return cache, err
|
|
||||||
}
|
|
||||||
if up != nil {
|
|
||||||
up(100)
|
|
||||||
}
|
|
||||||
return cache, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var reader io.Reader = stream
|
|
||||||
if up != nil {
|
|
||||||
reader = &ReaderUpdatingProgress{
|
|
||||||
Reader: stream,
|
|
||||||
UpdateProgress: up,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if w != nil {
|
|
||||||
reader = io.TeeReader(reader, w)
|
|
||||||
}
|
|
||||||
tmpF, err := utils.CreateTempFile(reader, stream.GetSize())
|
|
||||||
if err == nil {
|
|
||||||
stream.SetTmpFile(tmpF)
|
|
||||||
}
|
|
||||||
return tmpF, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func CacheFullInTempFileAndHash(stream model.FileStreamer, up model.UpdateProgress, hashType *utils.HashType, hashParams ...any) (model.File, string, error) {
|
|
||||||
h := hashType.NewFunc(hashParams...)
|
h := hashType.NewFunc(hashParams...)
|
||||||
tmpF, err := CacheFullInTempFileAndWriter(stream, up, h)
|
tmpF, err := stream.CacheFullAndWriter(up, h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
return tmpF, hex.EncodeToString(h.Sum(nil)), err
|
return tmpF, hex.EncodeToString(h.Sum(nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type StreamSectionReader struct {
|
type StreamSectionReader struct {
|
||||||
@ -199,12 +156,12 @@ type StreamSectionReader struct {
|
|||||||
bufPool *sync.Pool
|
bufPool *sync.Pool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStreamSectionReader(file model.FileStreamer, maxBufferSize int) (*StreamSectionReader, error) {
|
func NewStreamSectionReader(file model.FileStreamer, maxBufferSize int, up *model.UpdateProgress) (*StreamSectionReader, error) {
|
||||||
ss := &StreamSectionReader{file: file}
|
ss := &StreamSectionReader{file: file}
|
||||||
if file.GetFile() == nil {
|
if file.GetFile() == nil {
|
||||||
maxBufferSize = min(maxBufferSize, int(file.GetSize()))
|
maxBufferSize = min(maxBufferSize, int(file.GetSize()))
|
||||||
if maxBufferSize > conf.MaxBufferLimit {
|
if maxBufferSize > conf.MaxBufferLimit {
|
||||||
_, err := file.CacheFullInTempFile()
|
_, err := file.CacheFullAndWriter(up, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -240,7 +197,7 @@ func (ss *StreamSectionReader) GetSectionReader(off, length int64) (*SectionRead
|
|||||||
return &SectionReader{io.NewSectionReader(cache, off, length), buf}, nil
|
return &SectionReader{io.NewSectionReader(cache, off, length), buf}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ss *StreamSectionReader) RecycleSectionReader(sr *SectionReader) {
|
func (ss *StreamSectionReader) FreeSectionReader(sr *SectionReader) {
|
||||||
if sr != nil {
|
if sr != nil {
|
||||||
if sr.buf != nil {
|
if sr.buf != nil {
|
||||||
ss.bufPool.Put(sr.buf[0:cap(sr.buf)])
|
ss.bufPool.Put(sr.buf[0:cap(sr.buf)])
|
||||||
|
92
pkg/buffer/bytes.go
Normal file
92
pkg/buffer/bytes.go
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
package buffer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// 用于存储不复用的[]byte
|
||||||
|
type Reader struct {
|
||||||
|
bufs [][]byte
|
||||||
|
length int
|
||||||
|
offset int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) Len() int {
|
||||||
|
return r.length
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) Append(buf []byte) {
|
||||||
|
r.length += len(buf)
|
||||||
|
r.bufs = append(r.bufs, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) Read(p []byte) (int, error) {
|
||||||
|
n, err := r.ReadAt(p, int64(r.offset))
|
||||||
|
if n > 0 {
|
||||||
|
r.offset += n
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) ReadAt(p []byte, off int64) (int, error) {
|
||||||
|
if off < 0 || off >= int64(r.length) {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
n, length := 0, int64(0)
|
||||||
|
readFrom := false
|
||||||
|
for _, buf := range r.bufs {
|
||||||
|
newLength := length + int64(len(buf))
|
||||||
|
if readFrom {
|
||||||
|
w := copy(p[n:], buf)
|
||||||
|
n += w
|
||||||
|
} else if off < newLength {
|
||||||
|
readFrom = true
|
||||||
|
w := copy(p[n:], buf[int(off-length):])
|
||||||
|
n += w
|
||||||
|
}
|
||||||
|
if n == len(p) {
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
length = newLength
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
var abs int
|
||||||
|
switch whence {
|
||||||
|
case io.SeekStart:
|
||||||
|
abs = int(offset)
|
||||||
|
case io.SeekCurrent:
|
||||||
|
abs = r.offset + int(offset)
|
||||||
|
case io.SeekEnd:
|
||||||
|
abs = r.length + int(offset)
|
||||||
|
default:
|
||||||
|
return 0, errors.New("Seek: invalid whence")
|
||||||
|
}
|
||||||
|
|
||||||
|
if abs < 0 || abs > r.length {
|
||||||
|
return 0, errors.New("Seek: invalid offset")
|
||||||
|
}
|
||||||
|
|
||||||
|
r.offset = abs
|
||||||
|
return int64(abs), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) Reset() {
|
||||||
|
clear(r.bufs)
|
||||||
|
r.bufs = nil
|
||||||
|
r.length = 0
|
||||||
|
r.offset = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewReader(buf ...[]byte) *Reader {
|
||||||
|
b := &Reader{}
|
||||||
|
for _, b1 := range buf {
|
||||||
|
b.Append(b1)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
95
pkg/buffer/bytes_test.go
Normal file
95
pkg/buffer/bytes_test.go
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
package buffer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReader_ReadAt(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
p []byte
|
||||||
|
off int64
|
||||||
|
}
|
||||||
|
bs := &Reader{}
|
||||||
|
bs.Append([]byte("github.com"))
|
||||||
|
bs.Append([]byte("/"))
|
||||||
|
bs.Append([]byte("OpenList"))
|
||||||
|
bs.Append([]byte("Team/"))
|
||||||
|
bs.Append([]byte("OpenList"))
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
b *Reader
|
||||||
|
args args
|
||||||
|
want func(a args, n int, err error) error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "readAt len 10 offset 0",
|
||||||
|
b: bs,
|
||||||
|
args: args{
|
||||||
|
p: make([]byte, 10),
|
||||||
|
off: 0,
|
||||||
|
},
|
||||||
|
want: func(a args, n int, err error) error {
|
||||||
|
if n != len(a.p) {
|
||||||
|
return errors.New("read length not match")
|
||||||
|
}
|
||||||
|
if string(a.p) != "github.com" {
|
||||||
|
return errors.New("read content not match")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "readAt len 12 offset 11",
|
||||||
|
b: bs,
|
||||||
|
args: args{
|
||||||
|
p: make([]byte, 12),
|
||||||
|
off: 11,
|
||||||
|
},
|
||||||
|
want: func(a args, n int, err error) error {
|
||||||
|
if n != len(a.p) {
|
||||||
|
return errors.New("read length not match")
|
||||||
|
}
|
||||||
|
if string(a.p) != "OpenListTeam" {
|
||||||
|
return errors.New("read content not match")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "readAt len 50 offset 24",
|
||||||
|
b: bs,
|
||||||
|
args: args{
|
||||||
|
p: make([]byte, 50),
|
||||||
|
off: 24,
|
||||||
|
},
|
||||||
|
want: func(a args, n int, err error) error {
|
||||||
|
if n != bs.Len()-int(a.off) {
|
||||||
|
return errors.New("read length not match")
|
||||||
|
}
|
||||||
|
if string(a.p[:n]) != "OpenList" {
|
||||||
|
return errors.New("read content not match")
|
||||||
|
}
|
||||||
|
if err != io.EOF {
|
||||||
|
return errors.New("expect eof")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := tt.b.ReadAt(tt.args.p, tt.args.off)
|
||||||
|
if err := tt.want(tt.args, got, err); err != nil {
|
||||||
|
t.Errorf("Bytes.ReadAt() error = %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -53,11 +53,12 @@ func (g *Group) Go(do func(ctx context.Context) error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Lifecycle struct {
|
type Lifecycle struct {
|
||||||
// Before在OrderedGroup是线程安全的
|
// Before在OrderedGroup是线程安全的。
|
||||||
|
// 只会被调用一次
|
||||||
Before func(ctx context.Context) error
|
Before func(ctx context.Context) error
|
||||||
// 如果Before返回err就不调用Do
|
// 如果Before返回err就不调用Do
|
||||||
Do func(ctx context.Context) error
|
Do func(ctx context.Context) error
|
||||||
// 最后调用After
|
// 最后调用一次After
|
||||||
After func(err error)
|
After func(err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -194,32 +194,32 @@ type SyncClosersIF interface {
|
|||||||
|
|
||||||
type SyncClosers struct {
|
type SyncClosers struct {
|
||||||
closers []io.Closer
|
closers []io.Closer
|
||||||
ref atomic.Int32
|
ref int32
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ SyncClosersIF = (*SyncClosers)(nil)
|
var _ SyncClosersIF = (*SyncClosers)(nil)
|
||||||
|
|
||||||
func (c *SyncClosers) AcquireReference() bool {
|
func (c *SyncClosers) AcquireReference() bool {
|
||||||
ref := c.ref.Add(1)
|
ref := atomic.AddInt32(&c.ref, 1)
|
||||||
if ref > 0 {
|
if ref > 0 {
|
||||||
// log.Debugf("SyncClosers.AcquireReference %p,ref=%d\n", c, ref)
|
// log.Debugf("SyncClosers.AcquireReference %p,ref=%d\n", c, ref)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
c.ref.Store(math.MinInt16)
|
atomic.StoreInt32(&c.ref, math.MinInt16)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *SyncClosers) Close() error {
|
func (c *SyncClosers) Close() error {
|
||||||
ref := c.ref.Add(-1)
|
ref := atomic.AddInt32(&c.ref, -1)
|
||||||
if ref < -1 {
|
if ref < -1 {
|
||||||
c.ref.Store(math.MinInt16)
|
atomic.StoreInt32(&c.ref, math.MinInt16)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// log.Debugf("SyncClosers.Close %p,ref=%d\n", c, ref+1)
|
// log.Debugf("SyncClosers.Close %p,ref=%d\n", c, ref+1)
|
||||||
if ref > 0 {
|
if ref > 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
c.ref.Store(math.MinInt16)
|
atomic.StoreInt32(&c.ref, math.MinInt16)
|
||||||
|
|
||||||
var errs []error
|
var errs []error
|
||||||
for _, closer := range c.closers {
|
for _, closer := range c.closers {
|
||||||
@ -234,7 +234,7 @@ func (c *SyncClosers) Close() error {
|
|||||||
|
|
||||||
func (c *SyncClosers) Add(closer io.Closer) {
|
func (c *SyncClosers) Add(closer io.Closer) {
|
||||||
if closer != nil {
|
if closer != nil {
|
||||||
if c.ref.Load() < 0 {
|
if atomic.LoadInt32(&c.ref) < 0 {
|
||||||
panic("Not reusable")
|
panic("Not reusable")
|
||||||
}
|
}
|
||||||
c.closers = append(c.closers, closer)
|
c.closers = append(c.closers, closer)
|
||||||
@ -243,7 +243,7 @@ func (c *SyncClosers) Add(closer io.Closer) {
|
|||||||
|
|
||||||
func (c *SyncClosers) AddIfCloser(a any) {
|
func (c *SyncClosers) AddIfCloser(a any) {
|
||||||
if closer, ok := a.(io.Closer); ok {
|
if closer, ok := a.(io.Closer); ok {
|
||||||
if c.ref.Load() < 0 {
|
if atomic.LoadInt32(&c.ref) < 0 {
|
||||||
panic("Not reusable")
|
panic("Not reusable")
|
||||||
}
|
}
|
||||||
c.closers = append(c.closers, closer)
|
c.closers = append(c.closers, closer)
|
||||||
|
Reference in New Issue
Block a user