perf(link): optimize concurrent response (#641)

* fix(crypt): bug caused by link cache

* perf(crypt,mega,halalcloud,quark,uc): optimize concurrent response link

* chore: 删除无用代码

* ftp

* 修复bug;资源释放

* 添加SyncClosers

* local,sftp,smb

* 重构,优化,增强

* Update internal/stream/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>

* chore

* chore

* 优化,修复bug

* .

---------

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
j2rong4cn
2025-07-12 17:57:54 +08:00
committed by GitHub
parent e5fbe72581
commit cc01b410a4
83 changed files with 796 additions and 751 deletions

View File

@ -1,20 +1,26 @@
package driver
type Config struct {
Name string `json:"name"`
LocalSort bool `json:"local_sort"`
OnlyLocal bool `json:"only_local"`
OnlyProxy bool `json:"only_proxy"`
NoCache bool `json:"no_cache"`
NoUpload bool `json:"no_upload"`
NeedMs bool `json:"need_ms"` // if need get message from user, such as validate code
DefaultRoot string `json:"default_root"`
CheckStatus bool `json:"-"`
Alert string `json:"alert"` //info,success,warning,danger
NoOverwriteUpload bool `json:"-"` // whether to support overwrite upload
ProxyRangeOption bool `json:"-"`
Name string `json:"name"`
LocalSort bool `json:"local_sort"`
// if the driver returns Link with MFile, this should be set to true
OnlyLinkMFile bool `json:"only_local"`
OnlyProxy bool `json:"only_proxy"`
NoCache bool `json:"no_cache"`
NoUpload bool `json:"no_upload"`
// if need get message from user, such as validate code
NeedMs bool `json:"need_ms"`
DefaultRoot string `json:"default_root"`
CheckStatus bool `json:"-"`
//info,success,warning,danger
Alert string `json:"alert"`
// whether to support overwrite upload
NoOverwriteUpload bool `json:"-"`
ProxyRangeOption bool `json:"-"`
// if the driver returns Link without URL, this should be set to true
NoLinkURL bool `json:"-"`
}
func (c Config) MustProxy() bool {
return c.OnlyProxy || c.OnlyLocal
return c.OnlyProxy || c.OnlyLinkMFile || c.NoLinkURL
}

View File

@ -3,7 +3,6 @@ package fs
import (
"context"
"fmt"
"net/http"
stdpath "path"
"time"
@ -86,19 +85,17 @@ func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool
}
if !srcObj.IsDir() {
// copy file directly
link, _, err := op.Link(ctx, srcStorage, srcObjActualPath, model.LinkArgs{
Header: http.Header{},
})
link, _, err := op.Link(ctx, srcStorage, srcObjActualPath, model.LinkArgs{})
if err != nil {
return nil, errors.WithMessagef(err, "failed get [%s] link", srcObjPath)
}
fs := stream.FileStream{
// any link provided is seekable
ss, err := stream.NewSeekableStream(&stream.FileStream{
Obj: srcObj,
Ctx: ctx,
}
// any link provided is seekable
ss, err := stream.NewSeekableStream(fs, link)
}, link)
if err != nil {
_ = link.Close()
return nil, errors.WithMessagef(err, "failed get [%s] stream", srcObjPath)
}
return nil, op.Put(ctx, dstStorage, dstDirActualPath, ss, nil, false)
@ -165,19 +162,17 @@ func copyFileBetween2Storages(tsk *CopyTask, srcStorage, dstStorage driver.Drive
return errors.WithMessagef(err, "failed get src [%s] file", srcFilePath)
}
tsk.SetTotalBytes(srcFile.GetSize())
link, _, err := op.Link(tsk.Ctx(), srcStorage, srcFilePath, model.LinkArgs{
Header: http.Header{},
})
link, _, err := op.Link(tsk.Ctx(), srcStorage, srcFilePath, model.LinkArgs{})
if err != nil {
return errors.WithMessagef(err, "failed get [%s] link", srcFilePath)
}
fs := stream.FileStream{
// any link provided is seekable
ss, err := stream.NewSeekableStream(&stream.FileStream{
Obj: srcFile,
Ctx: tsk.Ctx(),
}
// any link provided is seekable
ss, err := stream.NewSeekableStream(fs, link)
}, link)
if err != nil {
_ = link.Close()
return errors.WithMessagef(err, "failed get [%s] stream", srcFilePath)
}
return op.Put(tsk.Ctx(), dstStorage, dstDirPath, ss, tsk.SetProgress, true)

View File

@ -3,7 +3,6 @@ package fs
import (
"context"
"fmt"
"net/http"
stdpath "path"
"sync"
"time"
@ -346,23 +345,18 @@ func (t *MoveTask) copyFile(srcStorage, dstStorage driver.Driver, srcFilePath, d
return errors.WithMessagef(err, "failed get src [%s] file", srcFilePath)
}
link, _, err := op.Link(t.Ctx(), srcStorage, srcFilePath, model.LinkArgs{
Header: http.Header{},
})
link, _, err := op.Link(t.Ctx(), srcStorage, srcFilePath, model.LinkArgs{})
if err != nil {
return errors.WithMessagef(err, "failed get [%s] link", srcFilePath)
}
fs := stream.FileStream{
ss, err := stream.NewSeekableStream(&stream.FileStream{
Obj: srcFile,
Ctx: t.Ctx(),
}
ss, err := stream.NewSeekableStream(fs, link)
}, link)
if err != nil {
_ = link.Close()
return errors.WithMessagef(err, "failed get [%s] stream", srcFilePath)
}
return op.Put(t.Ctx(), dstStorage, dstDirPath, ss, nil, true)
}

View File

@ -10,8 +10,8 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/task"
"github.com/pkg/errors"
"github.com/OpenListTeam/tache"
"github.com/pkg/errors"
)
type UploadTask struct {
@ -73,9 +73,11 @@ func putAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer)
func putDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer, lazyCache ...bool) error {
storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
if err != nil {
_ = file.Close()
return errors.WithMessage(err, "failed get storage")
}
if storage.Config().NoUpload {
_ = file.Close()
return errors.WithStack(errs.UploadNotSupported)
}
return op.Put(ctx, storage, dstDirActualPath, file, nil, lazyCache...)

View File

@ -2,6 +2,7 @@ package model
import (
"context"
"errors"
"io"
"net/http"
"time"
@ -24,16 +25,25 @@ type LinkArgs struct {
}
type Link struct {
URL string `json:"url"` // most common way
Header http.Header `json:"header"` // needed header (for url)
RangeReadCloser RangeReadCloserIF `json:"-"` // recommended way if can't use URL
MFile io.ReadSeeker `json:"-"` // best for local,smb... file system, which exposes MFile
URL string `json:"url"` // most common way
Header http.Header `json:"header"` // needed header (for url)
RangeReader RangeReaderIF `json:"-"` // recommended way if can't use URL
MFile File `json:"-"` // best for local,smb... file system, which exposes MFile
Expiration *time.Duration // local cache expire Duration
//for accelerating request, use multi-thread downloading
Concurrency int `json:"concurrency"`
PartSize int `json:"part_size"`
utils.SyncClosers `json:"-"`
}
func (l *Link) Close() error {
if clr, ok := l.MFile.(io.Closer); ok {
return errors.Join(clr.Close(), l.SyncClosers.Close())
}
return l.SyncClosers.Close()
}
type OtherArgs struct {
@ -74,23 +84,24 @@ type ArchiveDecompressArgs struct {
PutIntoNewDir bool
}
type RangeReadCloserIF interface {
type RangeReaderIF interface {
RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error)
}
type RangeReadCloserIF interface {
RangeReaderIF
utils.ClosersIF
}
var _ RangeReadCloserIF = (*RangeReadCloser)(nil)
type RangeReadCloser struct {
RangeReader RangeReaderFunc
RangeReader RangeReaderIF
utils.Closers
}
func (r *RangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
rc, err := r.RangeReader(ctx, httpRange)
r.Closers.Add(rc)
rc, err := r.RangeReader.RangeRead(ctx, httpRange)
r.Add(rc)
return rc, err
}
// type WriterFunc func(w io.Writer) error
type RangeReaderFunc func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error)

View File

@ -1,6 +1,9 @@
package model
import "io"
import (
"errors"
"io"
)
// File is basic file level accessing interface
type File interface {
@ -8,3 +11,22 @@ type File interface {
io.ReaderAt
io.Seeker
}
type FileCloser struct {
File
io.Closer
}
func (f *FileCloser) Close() error {
var errs []error
if clr, ok := f.File.(io.Closer); ok {
errs = append(errs, clr.Close())
}
if f.Closer != nil {
errs = append(errs, f.Closer.Close())
}
return errors.Join(errs...)
}
type FileRangeReader struct {
RangeReaderIF
}

View File

@ -37,7 +37,7 @@ type Obj interface {
// FileStreamer ->check FileStream for more comments
type FileStreamer interface {
io.Reader
io.Closer
utils.ClosersIF
Obj
GetMimetype() string
//SetReader(io.Reader)

View File

@ -12,6 +12,7 @@ import (
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
@ -70,7 +71,7 @@ func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readClo
var finalP HttpRequestParams
awsutil.Copy(&finalP, p)
if finalP.Range.Length == -1 {
if finalP.Range.Length < 0 || finalP.Range.Start+finalP.Range.Length > finalP.Size {
finalP.Range.Length = finalP.Size - finalP.Range.Start
}
impl := downloader{params: &finalP, cfg: d, ctx: ctx}
@ -120,7 +121,7 @@ type ConcurrencyLimit struct {
Limit int // 需要大于0
}
var ErrExceedMaxConcurrency = errors.New("ExceedMaxConcurrency")
var ErrExceedMaxConcurrency = ErrorHttpStatusCode(http.StatusTooManyRequests)
func (l *ConcurrencyLimit) sub() error {
l._m.Lock()
@ -181,6 +182,7 @@ func (d *downloader) download() (io.ReadCloser, error) {
resp.Body = utils.NewReadCloser(resp.Body, func() error {
d.m.Lock()
defer d.m.Unlock()
var err error
if closeFunc != nil {
d.concurrencyFinish()
err = closeFunc()
@ -199,7 +201,7 @@ func (d *downloader) download() (io.ReadCloser, error) {
d.pos = d.params.Range.Start
d.maxPos = d.params.Range.Start + d.params.Range.Length
d.concurrency = d.cfg.Concurrency
d.sendChunkTask(true)
_ = d.sendChunkTask(true)
var rc io.ReadCloser = NewMultiReadCloser(d.bufs[0], d.interrupt, d.finishBuf)
@ -303,7 +305,7 @@ func (d *downloader) finishBuf(id int) (isLast bool, nextBuf *Buf) {
return true, nil
}
d.sendChunkTask(false)
_ = d.sendChunkTask(false)
d.readingID = id
return false, d.getBuf(id)
@ -398,14 +400,15 @@ var errInfiniteRetry = errors.New("infinite retry")
func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
resp, err := d.cfg.HttpClient(d.ctx, params)
if err != nil {
if resp == nil {
statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode)
if !ok {
return 0, err
}
if resp.StatusCode == http.StatusRequestedRangeNotSatisfiable {
if statusCode == http.StatusRequestedRangeNotSatisfiable {
return 0, err
}
if ch.id == 0 { //第1个任务 有限的重试,超过重试就会结束请求
switch resp.StatusCode {
switch statusCode {
default:
return 0, err
case http.StatusTooManyRequests:
@ -414,7 +417,7 @@ func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int
case http.StatusGatewayTimeout:
}
<-time.After(time.Millisecond * 200)
return 0, &errNeedRetry{err: fmt.Errorf("http request failure,status: %d", resp.StatusCode)}
return 0, &errNeedRetry{err: err}
}
// 来到这 说明第1个分片下载 连接成功了
@ -450,7 +453,7 @@ func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int
return 0, err
}
}
d.sendChunkTask(true)
_ = d.sendChunkTask(true)
n, err := utils.CopyWithBuffer(ch.buf, resp.Body)
if err != nil {
@ -552,12 +555,26 @@ type chunk struct {
func DefaultHttpRequestFunc(ctx context.Context, params *HttpRequestParams) (*http.Response, error) {
header := http_range.ApplyRangeToHttpHeader(params.Range, params.HeaderRef)
return RequestHttp(ctx, "GET", header, params.URL)
}
res, err := RequestHttp(ctx, "GET", header, params.URL)
if err != nil {
return res, err
func GetRangeReaderHttpRequestFunc(rangeReader model.RangeReaderIF) HttpRequestFunc {
return func(ctx context.Context, params *HttpRequestParams) (*http.Response, error) {
rc, err := rangeReader.RangeRead(ctx, params.Range)
if err != nil {
return nil, err
}
return &http.Response{
StatusCode: http.StatusPartialContent,
Status: http.StatusText(http.StatusPartialContent),
Body: rc,
Header: http.Header{
"Content-Range": {params.Range.ContentRange(params.Size)},
},
ContentLength: params.Range.Length,
}, nil
}
return res, nil
}
type HttpRequestParams struct {

View File

@ -114,14 +114,14 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
// 使用请求的Context
// 不然从sendContent读不到数据即使请求断开CopyBuffer也会一直堵塞
ctx := context.WithValue(r.Context(), "request_header", r.Header)
ctx := r.Context()
switch {
case len(ranges) == 0:
reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1})
if err != nil {
code = http.StatusRequestedRangeNotSatisfiable
if errors.Is(err, ErrExceedMaxConcurrency) {
code = http.StatusTooManyRequests
if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
code = int(statusCode)
}
http.Error(w, err.Error(), code)
return nil
@ -143,8 +143,8 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
sendContent, err = RangeReadCloser.RangeRead(ctx, ra)
if err != nil {
code = http.StatusRequestedRangeNotSatisfiable
if errors.Is(err, ErrExceedMaxConcurrency) {
code = http.StatusTooManyRequests
if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
code = int(statusCode)
}
http.Error(w, err.Error(), code)
return nil
@ -205,8 +205,8 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
log.Warnf("Maybe size incorrect or reader not giving correct/full data, or connection closed before finish. written bytes: %d ,sendSize:%d, ", written, sendSize)
}
code = http.StatusInternalServerError
if errors.Is(err, ErrExceedMaxConcurrency) {
code = http.StatusTooManyRequests
if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
code = int(statusCode)
}
w.WriteHeader(code)
return err
@ -259,11 +259,17 @@ func RequestHttp(ctx context.Context, httpMethod string, headerOverride http.Hea
_ = res.Body.Close()
msg := string(all)
log.Debugln(msg)
return res, fmt.Errorf("http request [%s] failure,status: %d response:%s", URL, res.StatusCode, msg)
return nil, fmt.Errorf("http request [%s] failure,status: %w response:%s", URL, ErrorHttpStatusCode(res.StatusCode), msg)
}
return res, nil
}
type ErrorHttpStatusCode int
func (e ErrorHttpStatusCode) Error() string {
return fmt.Sprintf("%d|%s", e, http.StatusText(int(e)))
}
var once sync.Once
var httpClient *http.Client

View File

@ -350,3 +350,5 @@ func GetRangedHttpReader(readCloser io.ReadCloser, offset, length int64) (io.Rea
// return an io.ReadCloser that is limited to `length` bytes.
return &LimitedReadCloser{readCloser, length_int}, nil
}
type RequestHeaderKey struct{}

View File

@ -3,7 +3,6 @@ package tool
import (
"context"
"fmt"
"net/http"
"os"
stdpath "path"
"path/filepath"
@ -43,11 +42,11 @@ func (t *TransferTask) Run() error {
defer func() { t.SetEndTime(time.Now()) }()
if t.SrcStorage == nil {
if t.DeletePolicy == UploadDownloadStream {
rrc, err := stream.GetRangeReadCloserFromLink(t.GetTotalBytes(), &model.Link{URL: t.Url})
rr, err := stream.GetRangeReaderFromLink(t.GetTotalBytes(), &model.Link{URL: t.Url})
if err != nil {
return err
}
r, err := rrc.RangeRead(t.Ctx(), http_range.Range{Length: t.GetTotalBytes()})
r, err := rr.RangeRead(t.Ctx(), http_range.Range{Length: t.GetTotalBytes()})
if err != nil {
return err
}
@ -63,9 +62,8 @@ func (t *TransferTask) Run() error {
},
Reader: r,
Mimetype: mimetype,
Closers: utils.NewClosers(rrc),
Closers: utils.NewClosers(r),
}
defer s.Close()
return op.Put(t.Ctx(), t.DstStorage, t.DstDirPath, s, t.SetProgress)
}
return transferStdPath(t)
@ -279,19 +277,17 @@ func transferObjFile(t *TransferTask) error {
if err != nil {
return errors.WithMessagef(err, "failed get src [%s] file", t.SrcObjPath)
}
link, _, err := op.Link(t.Ctx(), t.SrcStorage, t.SrcObjPath, model.LinkArgs{
Header: http.Header{},
})
link, _, err := op.Link(t.Ctx(), t.SrcStorage, t.SrcObjPath, model.LinkArgs{})
if err != nil {
return errors.WithMessagef(err, "failed get [%s] link", t.SrcObjPath)
}
fs := stream.FileStream{
// any link provided is seekable
ss, err := stream.NewSeekableStream(&stream.FileStream{
Obj: srcFile,
Ctx: t.Ctx(),
}
// any link provided is seekable
ss, err := stream.NewSeekableStream(fs, link)
}, link)
if err != nil {
_ = link.Close()
return errors.WithMessagef(err, "failed get [%s] stream", t.SrcObjPath)
}
t.SetTotalBytes(srcFile.GetSize())

View File

@ -31,12 +31,6 @@ func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
}
path = utils.FixAndCleanPath(path)
key := Key(storage, path)
if !args.Refresh {
if meta, ok := archiveMetaCache.Get(key); ok {
log.Debugf("use cache when get %s archive meta", path)
return meta, nil
}
}
fn := func() (*model.ArchiveMetaProvider, error) {
_, m, err := getArchiveMeta(ctx, storage, path, args)
if err != nil {
@ -47,10 +41,16 @@ func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
}
return m, nil
}
if storage.Config().OnlyLocal {
if storage.Config().OnlyLinkMFile {
meta, err := fn()
return meta, err
}
if !args.Refresh {
if meta, ok := archiveMetaCache.Get(key); ok {
log.Debugf("use cache when get %s archive meta", path)
return meta, nil
}
}
meta, err, _ := archiveMetaG.Do(key, fn)
return meta, err
}
@ -62,12 +62,7 @@ func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path st
}
baseName, ext, found := strings.Cut(obj.GetName(), ".")
if !found {
if clr, ok := l.MFile.(io.Closer); ok {
_ = clr.Close()
}
if l.RangeReadCloser != nil {
_ = l.RangeReadCloser.Close()
}
_ = l.Close()
return nil, nil, nil, errors.Errorf("failed get archive tool: the obj does not have an extension.")
}
partExt, t, err := tool.GetArchiveTool("." + ext)
@ -75,23 +70,13 @@ func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path st
var e error
partExt, t, e = tool.GetArchiveTool(stdpath.Ext(obj.GetName()))
if e != nil {
if clr, ok := l.MFile.(io.Closer); ok {
_ = clr.Close()
}
if l.RangeReadCloser != nil {
_ = l.RangeReadCloser.Close()
}
_ = l.Close()
return nil, nil, nil, errors.WithMessagef(stderrors.Join(err, e), "failed get archive tool: %s", ext)
}
}
ss, err := stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: obj}, l)
ss, err := stream.NewSeekableStream(&stream.FileStream{Ctx: ctx, Obj: obj}, l)
if err != nil {
if clr, ok := l.MFile.(io.Closer); ok {
_ = clr.Close()
}
if l.RangeReadCloser != nil {
_ = l.RangeReadCloser.Close()
}
_ = l.Close()
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
}
ret := []*stream.SeekableStream{ss}
@ -107,14 +92,9 @@ func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path st
if err != nil {
break
}
ss, err = stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: o}, l)
ss, err = stream.NewSeekableStream(&stream.FileStream{Ctx: ctx, Obj: o}, l)
if err != nil {
if clr, ok := l.MFile.(io.Closer); ok {
_ = clr.Close()
}
if l.RangeReadCloser != nil {
_ = l.RangeReadCloser.Close()
}
_ = l.Close()
for _, s := range ret {
_ = s.Close()
}
@ -375,12 +355,12 @@ func ArchiveGet(ctx context.Context, storage driver.Driver, path string, args mo
}
type extractLink struct {
Link *model.Link
Obj model.Obj
*model.Link
Obj model.Obj
}
var extractCache = cache.NewMemCache(cache.WithShards[*extractLink](16))
var extractG singleflight.Group[*extractLink]
var extractG = singleflight.Group[*extractLink]{Remember: true}
func DriverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
@ -389,9 +369,9 @@ func DriverExtract(ctx context.Context, storage driver.Driver, path string, args
key := stdpath.Join(Key(storage, path), args.InnerPath)
if link, ok := extractCache.Get(key); ok {
return link.Link, link.Obj, nil
} else if link, ok := extractCache.Get(key + ":" + args.IP); ok {
return link.Link, link.Obj, nil
}
var forget utils.CloseFunc
fn := func() (*extractLink, error) {
link, err := driverExtract(ctx, storage, path, args)
if err != nil {
@ -400,16 +380,33 @@ func DriverExtract(ctx context.Context, storage driver.Driver, path string, args
if link.Link.Expiration != nil {
extractCache.Set(key, link, cache.WithEx[*extractLink](*link.Link.Expiration))
}
link.Add(forget)
return link, nil
}
if storage.Config().OnlyLocal {
if storage.Config().OnlyLinkMFile {
link, err := fn()
if err != nil {
return nil, nil, err
}
return link.Link, link.Obj, nil
}
forget = func() error {
if forget != nil {
forget = nil
linkG.Forget(key)
}
return nil
}
link, err, _ := extractG.Do(key, fn)
if err == nil && !link.AcquireReference() {
link, err, _ = extractG.Do(key, fn)
if err == nil {
link.AcquireReference()
}
}
if err != nil {
return nil, nil, err
}

View File

@ -81,7 +81,15 @@ func getMainItems(config driver.Config) []driver.Item {
Help: "The cache expiration time for this storage",
})
}
if !config.OnlyProxy && !config.OnlyLocal {
if config.MustProxy() {
items = append(items, driver.Item{
Name: "webdav_policy",
Type: conf.TypeSelect,
Default: "native_proxy",
Options: "use_proxy_url,native_proxy",
Required: true,
})
} else {
items = append(items, []driver.Item{{
Name: "web_proxy",
Type: conf.TypeBool,
@ -104,14 +112,6 @@ func getMainItems(config driver.Config) []driver.Item {
}
items = append(items, item)
}
} else {
items = append(items, driver.Item{
Name: "webdav_policy",
Type: conf.TypeSelect,
Default: "native_proxy",
Options: "use_proxy_url,native_proxy",
Required: true,
})
}
items = append(items, driver.Item{
Name: "down_proxy_url",

View File

@ -244,7 +244,7 @@ func GetUnwrap(ctx context.Context, storage driver.Driver, path string) (model.O
}
var linkCache = cache.NewMemCache(cache.WithShards[*model.Link](16))
var linkG singleflight.Group[*model.Link]
var linkG = singleflight.Group[*model.Link]{Remember: true}
// Link get link, if is an url. should have an expiry time
func Link(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (*model.Link, model.Obj, error) {
@ -262,6 +262,8 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
if link, ok := linkCache.Get(key); ok {
return link, file, nil
}
var forget utils.CloseFunc
fn := func() (*model.Link, error) {
link, err := storage.Link(ctx, file, args)
if err != nil {
@ -270,15 +272,29 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
if link.Expiration != nil {
linkCache.Set(key, link, cache.WithEx[*model.Link](*link.Expiration))
}
link.Add(forget)
return link, nil
}
if storage.Config().OnlyLocal {
if storage.Config().OnlyLinkMFile {
link, err := fn()
return link, file, err
}
forget = func() error {
if forget != nil {
forget = nil
linkG.Forget(key)
}
return nil
}
link, err, _ := linkG.Do(key, fn)
if err == nil && !link.AcquireReference() {
link, err, _ = linkG.Do(key, fn)
if err == nil {
link.AcquireReference()
}
}
return link, file, err
}
@ -507,14 +523,15 @@ func Remove(ctx context.Context, storage driver.Driver, path string) error {
}
func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file model.FileStreamer, up driver.UpdateProgress, lazyCache ...bool) error {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
}
close := file.Close
defer func() {
if err := file.Close(); err != nil {
if err := close(); err != nil {
log.Errorf("failed to close file streamer, %v", err)
}
}()
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
}
// UrlTree PUT
if storage.GetStorage().Driver == "UrlTree" {
var link string

View File

@ -142,19 +142,19 @@ func (r *RateLimitFile) Close() error {
return nil
}
type RateLimitRangeReadCloser struct {
model.RangeReadCloserIF
Limiter Limiter
}
type RateLimitRangeReaderFunc RangeReaderFunc
func (rrc *RateLimitRangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
rc, err := rrc.RangeReadCloserIF.RangeRead(ctx, httpRange)
func (f RateLimitRangeReaderFunc) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
rc, err := f(ctx, httpRange)
if err != nil {
return nil, err
}
return &RateLimitReader{
Reader: rc,
Limiter: rrc.Limiter,
Ctx: ctx,
}, nil
if ServerDownloadLimit != nil {
rc = &RateLimitReader{
Ctx: ctx,
Reader: rc,
Limiter: ServerDownloadLimit,
}
}
return rc, nil
}

View File

@ -110,8 +110,7 @@ const InMemoryBufMaxSizeBytes = InMemoryBufMaxSize * 1024 * 1024
// RangeRead have to cache all data first since only Reader is provided.
// also support a peeking RangeRead at very start, but won't buffer more than 10MB data in memory
func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
if httpRange.Length == -1 {
// 参考 internal/net/request.go
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > f.GetSize() {
httpRange.Length = f.GetSize() - httpRange.Start
}
var cache io.ReaderAt = f.GetFile()
@ -159,47 +158,40 @@ var _ model.FileStreamer = (*FileStream)(nil)
// additional resources that need to be closed, they should be added to the Closer property of
// the SeekableStream object and be closed together when the SeekableStream object is closed.
type SeekableStream struct {
FileStream
*FileStream
// should have one of belows to support rangeRead
rangeReadCloser model.RangeReadCloserIF
}
func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) {
func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error) {
if len(fs.Mimetype) == 0 {
fs.Mimetype = utils.GetMimeType(fs.Obj.GetName())
}
ss := &SeekableStream{FileStream: fs}
if ss.Reader != nil {
ss.TryAdd(ss.Reader)
return ss, nil
if fs.Reader != nil {
fs.Add(link)
return &SeekableStream{FileStream: fs}, nil
}
if link != nil {
if link.MFile != nil {
ss.Closers.TryAdd(link.MFile)
ss.Reader = link.MFile
return ss, nil
rr, err := GetRangeReaderFromLink(fs.GetSize(), link)
if err != nil {
return nil, err
}
if link.RangeReadCloser != nil {
ss.rangeReadCloser = &RateLimitRangeReadCloser{
RangeReadCloserIF: link.RangeReadCloser,
Limiter: ServerDownloadLimit,
}
ss.Add(ss.rangeReadCloser)
return ss, nil
}
if len(link.URL) > 0 {
rrc, err := GetRangeReadCloserFromLink(ss.GetSize(), link)
if _, ok := rr.(*model.FileRangeReader); ok {
fs.Reader, err = rr.RangeRead(fs.Ctx, http_range.Range{Length: -1})
if err != nil {
return nil, err
}
rrc = &RateLimitRangeReadCloser{
RangeReadCloserIF: rrc,
Limiter: ServerDownloadLimit,
}
ss.rangeReadCloser = rrc
ss.Add(rrc)
return ss, nil
fs.Add(link)
return &SeekableStream{FileStream: fs}, nil
}
rrc := &model.RangeReadCloser{
RangeReader: rr,
}
fs.Add(link)
fs.Add(rrc)
return &SeekableStream{FileStream: fs, rangeReadCloser: rrc}, nil
}
return nil, fmt.Errorf("illegal seekableStream")
}
@ -211,9 +203,6 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error)
// RangeRead is not thread-safe, pls use it in single thread only.
func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
if ss.tmpFile == nil && ss.rangeReadCloser != nil {
if httpRange.Length == -1 {
httpRange.Length = ss.GetSize() - httpRange.Start
}
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, httpRange)
if err != nil {
return nil, err
@ -229,10 +218,6 @@ func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, erro
// only provide Reader as full stream when it's demanded. in rapid-upload, we can skip this to save memory
func (ss *SeekableStream) Read(p []byte) (n int, err error) {
//f.mu.Lock()
//f.peekedOnce = true
//defer f.mu.Unlock()
if ss.Reader == nil {
if ss.rangeReadCloser == nil {
return 0, fmt.Errorf("illegal seekableStream")
@ -241,7 +226,7 @@ func (ss *SeekableStream) Read(p []byte) (n int, err error) {
if err != nil {
return 0, nil
}
ss.Reader = io.NopCloser(rc)
ss.Reader = rc
}
return ss.Reader.Read(p)
}
@ -496,7 +481,7 @@ func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
return r.masterOff, errors.New("invalid seek: negative position")
}
if offset > r.ss.GetSize() {
return r.masterOff, io.EOF
offset = r.ss.GetSize()
}
r.masterOff = offset
return offset, nil

View File

@ -3,6 +3,7 @@ package stream
import (
"context"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
@ -14,57 +15,93 @@ import (
log "github.com/sirupsen/logrus"
)
func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCloserIF, error) {
if len(link.URL) == 0 {
return nil, fmt.Errorf("can't create RangeReadCloser since URL is empty in link")
}
rangeReaderFunc := func(ctx context.Context, r http_range.Range) (io.ReadCloser, error) {
if link.Concurrency > 0 || link.PartSize > 0 {
header := net.ProcessHeader(nil, link.Header)
down := net.NewDownloader(func(d *net.Downloader) {
d.Concurrency = link.Concurrency
d.PartSize = link.PartSize
})
req := &net.HttpRequestParams{
URL: link.URL,
Range: r,
Size: size,
HeaderRef: header,
}
rc, err := down.Download(ctx, req)
return rc, err
type RangeReaderFunc func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error)
}
response, err := RequestRangedHttp(ctx, link, r.Start, r.Length)
if err != nil {
if response == nil {
return nil, fmt.Errorf("http request failure, err:%s", err)
func (f RangeReaderFunc) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
return f(ctx, httpRange)
}
func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF, error) {
if link.MFile != nil {
return &model.FileRangeReader{RangeReaderIF: GetRangeReaderFromMFile(size, link.MFile)}, nil
}
if link.Concurrency > 0 || link.PartSize > 0 {
down := net.NewDownloader(func(d *net.Downloader) {
d.Concurrency = link.Concurrency
d.PartSize = link.PartSize
})
var rangeReader RangeReaderFunc = func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
var req *net.HttpRequestParams
if link.RangeReader != nil {
req = &net.HttpRequestParams{
Range: httpRange,
Size: size,
}
} else {
requestHeader, _ := ctx.Value(net.RequestHeaderKey{}).(http.Header)
header := net.ProcessHeader(requestHeader, link.Header)
req = &net.HttpRequestParams{
Range: httpRange,
Size: size,
URL: link.URL,
HeaderRef: header,
}
}
return nil, err
return down.Download(ctx, req)
}
if r.Start == 0 && (r.Length == -1 || r.Length == size) || response.StatusCode == http.StatusPartialContent ||
checkContentRange(&response.Header, r.Start) {
if link.RangeReader != nil {
down.HttpClient = net.GetRangeReaderHttpRequestFunc(link.RangeReader)
return rangeReader, nil
}
return RateLimitRangeReaderFunc(rangeReader), nil
}
if link.RangeReader != nil {
return link.RangeReader, nil
}
if len(link.URL) == 0 {
return nil, errors.New("invalid link: must have at least one of MFile, URL, or RangeReader")
}
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > size {
httpRange.Length = size - httpRange.Start
}
requestHeader, _ := ctx.Value(net.RequestHeaderKey{}).(http.Header)
header := net.ProcessHeader(requestHeader, link.Header)
header = http_range.ApplyRangeToHttpHeader(httpRange, header)
response, err := net.RequestHttp(ctx, "GET", header, link.URL)
if err != nil {
if _, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok {
return nil, err
}
return nil, fmt.Errorf("http request failure, err:%w", err)
}
if httpRange.Start == 0 && (httpRange.Length == -1 || httpRange.Length == size) || response.StatusCode == http.StatusPartialContent ||
checkContentRange(&response.Header, httpRange.Start) {
return response.Body, nil
} else if response.StatusCode == http.StatusOK {
log.Warnf("remote http server not supporting range request, expect low perfromace!")
readCloser, err := net.GetRangedHttpReader(response.Body, r.Start, r.Length)
readCloser, err := net.GetRangedHttpReader(response.Body, httpRange.Start, httpRange.Length)
if err != nil {
return nil, err
}
return readCloser, nil
}
return response.Body, nil
}
resultRangeReadCloser := model.RangeReadCloser{RangeReader: rangeReaderFunc}
return &resultRangeReadCloser, nil
return RateLimitRangeReaderFunc(rangeReader), nil
}
func RequestRangedHttp(ctx context.Context, link *model.Link, offset, length int64) (*http.Response, error) {
header := net.ProcessHeader(nil, link.Header)
header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header)
return net.RequestHttp(ctx, "GET", header, link.URL)
func GetRangeReaderFromMFile(size int64, file model.File) RangeReaderFunc {
return func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
length := httpRange.Length
if length < 0 || httpRange.Start+length > size {
length = size - httpRange.Start
}
return &model.FileCloser{File: io.NewSectionReader(file, httpRange.Start, length)}, nil
}
}
// 139 cloud does not properly return 206 http status code, add a hack here