优化,修复bug

This commit is contained in:
j2rong4cn
2025-07-11 23:49:58 +08:00
parent 6d3329b595
commit 713b6f6919
8 changed files with 38 additions and 43 deletions

View File

@ -264,26 +264,20 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
mu := &sync.Mutex{}
var fileHeader []byte
rangeReaderFunc := func(ctx context.Context, offset, length int64) (io.ReadCloser, error) {
underlyingLength := length
var cacheFileHeader, readFileHeader bool
if offset == 0 && length > 0 {
cacheFileHeader = length > fileHeaderSize
readFileHeader = length <= fileHeaderSize
}
if readFileHeader || cacheFileHeader {
rangeReaderFunc := func(ctx context.Context, offset, limit int64) (io.ReadCloser, error) {
length := limit
if offset == 0 && limit > 0 {
mu.Lock()
if readFileHeader {
if limit <= fileHeaderSize {
defer mu.Unlock()
if fileHeader != nil {
return io.NopCloser(bytes.NewReader(fileHeader[:length])), nil
return io.NopCloser(bytes.NewReader(fileHeader[:limit])), nil
}
length = fileHeaderSize
} else if fileHeader != nil {
mu.Unlock()
cacheFileHeader = false
} else {
} else if fileHeader == nil {
defer mu.Unlock()
} else {
mu.Unlock()
}
}
@ -292,16 +286,16 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, err
}
if readFileHeader || cacheFileHeader {
if offset == 0 && limit > 0 {
fileHeader = make([]byte, fileHeaderSize)
n, _ := io.ReadFull(remoteReader, fileHeader)
if n != fileHeaderSize {
fileHeader = nil
return nil, fmt.Errorf("can't read data, expected=%d, got=%d", fileHeaderSize, n)
}
if readFileHeader {
if limit <= fileHeaderSize {
remoteReader.Close()
return io.NopCloser(bytes.NewReader(fileHeader[:underlyingLength])), nil
return io.NopCloser(bytes.NewReader(fileHeader[:limit])), nil
} else {
remoteReader = utils.ReadCloser{
Reader: io.MultiReader(bytes.NewReader(fileHeader), remoteReader),

View File

@ -5,7 +5,6 @@ import (
"context"
"errors"
"fmt"
"io"
"io/fs"
"net/http"
"os"
@ -255,9 +254,7 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
link.MFile = open
}
if link.MFile != nil && !d.Config().OnlyLinkMFile {
if clr, ok := link.MFile.(io.Closer); ok {
link.Add(clr)
}
link.AddIfCloser(link.MFile)
link.RangeReader = &model.FileRangeReader{
RangeReaderIF: stream.GetRangeReaderFromMFile(file.GetSize(), link.MFile),
}

View File

@ -17,14 +17,14 @@ type FileCloser struct {
}
func (f *FileCloser) Close() error {
var err error
var errs []error
if clr, ok := f.File.(io.Closer); ok {
err = errors.Join(err, clr.Close())
errs = append(errs, clr.Close())
}
if f.Closer != nil {
err = errors.Join(err, f.Closer.Close())
errs = append(errs, f.Closer.Close())
}
return err
return errors.Join(errs...)
}
type FileRangeReader struct {

View File

@ -121,7 +121,7 @@ type ConcurrencyLimit struct {
Limit int // 需要大于0
}
var ErrExceedMaxConcurrency = errors.New("ExceedMaxConcurrency")
var ErrExceedMaxConcurrency = ErrorHttpStatusCode(http.StatusTooManyRequests)
func (l *ConcurrencyLimit) sub() error {
l._m.Lock()
@ -201,7 +201,7 @@ func (d *downloader) download() (io.ReadCloser, error) {
d.pos = d.params.Range.Start
d.maxPos = d.params.Range.Start + d.params.Range.Length
d.concurrency = d.cfg.Concurrency
d.sendChunkTask(true)
_ = d.sendChunkTask(true)
var rc io.ReadCloser = NewMultiReadCloser(d.bufs[0], d.interrupt, d.finishBuf)
@ -305,7 +305,7 @@ func (d *downloader) finishBuf(id int) (isLast bool, nextBuf *Buf) {
return true, nil
}
d.sendChunkTask(false)
_ = d.sendChunkTask(false)
d.readingID = id
return false, d.getBuf(id)
@ -453,7 +453,7 @@ func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int
return 0, err
}
}
d.sendChunkTask(true)
_ = d.sendChunkTask(true)
n, err := utils.CopyWithBuffer(ch.buf, resp.Body)
if err != nil {

View File

@ -120,9 +120,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1})
if err != nil {
code = http.StatusRequestedRangeNotSatisfiable
if errors.Is(err, ErrExceedMaxConcurrency) {
code = http.StatusTooManyRequests
} else if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
code = int(statusCode)
}
http.Error(w, err.Error(), code)
@ -145,9 +143,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
sendContent, err = RangeReadCloser.RangeRead(ctx, ra)
if err != nil {
code = http.StatusRequestedRangeNotSatisfiable
if errors.Is(err, ErrExceedMaxConcurrency) {
code = http.StatusTooManyRequests
} else if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
code = int(statusCode)
}
http.Error(w, err.Error(), code)
@ -209,9 +205,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
log.Warnf("Maybe size incorrect or reader not giving correct/full data, or connection closed before finish. written bytes: %d ,sendSize:%d, ", written, sendSize)
}
code = http.StatusInternalServerError
if errors.Is(err, ErrExceedMaxConcurrency) {
code = http.StatusTooManyRequests
} else if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
code = int(statusCode)
}
w.WriteHeader(code)
@ -265,7 +259,7 @@ func RequestHttp(ctx context.Context, httpMethod string, headerOverride http.Hea
_ = res.Body.Close()
msg := string(all)
log.Debugln(msg)
return nil, fmt.Errorf("http request [%s] failure,status: %d response:%s", URL, ErrorHttpStatusCode(res.StatusCode), msg)
return nil, fmt.Errorf("http request [%s] failure,status: %w response:%s", URL, ErrorHttpStatusCode(res.StatusCode), msg)
}
return res, nil
}

View File

@ -73,10 +73,10 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
response, err := net.RequestHttp(ctx, "GET", header, link.URL)
if err != nil {
if _, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); !ok {
return nil, fmt.Errorf("http request failure, err:%s", err)
if _, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok {
return nil, err
}
return nil, err
return nil, fmt.Errorf("http request failure, err:%w", err)
}
if httpRange.Start == 0 && (httpRange.Length == -1 || httpRange.Length == size) || response.StatusCode == http.StatusPartialContent ||
checkContentRange(&response.Header, httpRange.Start) {

View File

@ -2,6 +2,7 @@ package handles
import (
"bytes"
"errors"
"fmt"
stdpath "path"
"strconv"
@ -11,6 +12,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/net"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
@ -154,7 +156,11 @@ func proxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) {
if Writer.IsWritten() {
log.Errorf("%s %s local proxy error: %+v", c.Request.Method, c.Request.URL.Path, err)
} else {
common.ErrorResp(c, err, 500, true)
if statusCode, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok {
common.ErrorResp(c, err, int(statusCode), true)
} else {
common.ErrorResp(c, err, 500, true)
}
}
}

View File

@ -17,6 +17,7 @@ import (
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/net"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
@ -252,6 +253,9 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta
}
err = common.Proxy(w, r, link, fi)
if err != nil {
if statusCode, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok {
return int(statusCode), err
}
return http.StatusInternalServerError, fmt.Errorf("webdav proxy error: %+v", err)
}
} else if storage.GetStorage().WebdavProxy() && downProxyUrl != "" {