fix(net): ensure accurate content-length in response (#749)

* fix(fs): ensure accurate content-length in http2 requests

Chrome browsers were unable to preview thumbnails, reporting an
'ERR_HTTP_2_PROTOCOL_ERROR'. This was caused by an incorrect
content-length header in the server's response for thumbnail images.

This commit corrects the content-length calculation, allowing
Chrome and other compliant clients to render thumbnails correctly.

* fix(net): ensure accurate content-length in response

* 补缺

* .

---------

Co-authored-by: zhiqiang.huang <zhiqiang.tech@gmail.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
This commit is contained in:
pnparadise
2025-07-19 20:36:27 +08:00
committed by GitHub
parent 648079ae24
commit 86324d2d6b
15 changed files with 105 additions and 51 deletions

View File

@ -122,10 +122,11 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
var resultLink *model.Link
if link != nil {
resultLink = &model.Link{
URL: link.URL,
Header: link.Header,
RangeReader: link.RangeReader,
SyncClosers: utils.NewSyncClosers(link),
URL: link.URL,
Header: link.Header,
RangeReader: link.RangeReader,
SyncClosers: utils.NewSyncClosers(link),
ContentLength: link.ContentLength,
}
if link.MFile != nil {
resultLink.RangeReader = &model.FileRangeReader{

View File

@ -256,7 +256,11 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, err
}
rrf, err := stream.GetRangeReaderFromLink(remoteFile.GetSize(), remoteLink)
remoteSize := remoteLink.ContentLength
if remoteSize <= 0 {
remoteSize = remoteFile.GetSize()
}
rrf, err := stream.GetRangeReaderFromLink(remoteSize, remoteLink)
if err != nil {
_ = remoteLink.Close()
return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion")
@ -313,7 +317,8 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
}
return readSeeker, nil
}),
SyncClosers: utils.NewSyncClosers(remoteLink),
SyncClosers: utils.NewSyncClosers(remoteLink),
ContentLength: remoteSize,
}, nil
}

View File

@ -228,10 +228,17 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if err != nil {
return nil, err
}
// Get thumbnail file size for Content-Length
stat, err := open.Stat()
if err != nil {
open.Close()
return nil, err
}
link.ContentLength = int64(stat.Size())
link.MFile = open
} else {
link.MFile = bytes.NewReader(buf.Bytes())
//link.Header.Set("Content-Length", strconv.Itoa(buf.Len()))
link.ContentLength = int64(buf.Len())
}
} else {
open, err := os.Open(fullPath)

View File

@ -149,7 +149,12 @@ func (d *QuarkOrUC) getTranscodingLink(file model.Obj) (*model.Link, error) {
return nil, err
}
return &model.Link{URL: resp.Data.VideoList[0].VideoInfo.URL}, nil
return &model.Link{
URL: resp.Data.VideoList[0].VideoInfo.URL,
ContentLength: resp.Data.Size,
Concurrency: 3,
PartSize: 10 * utils.MB,
}, nil
}
func (d *QuarkOrUC) upPre(file model.FileStreamer, parentId string) (UpPreResp, error) {

View File

@ -6,11 +6,12 @@ import (
"crypto/sha256"
"encoding/hex"
"errors"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"net/http"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
@ -228,9 +229,10 @@ func (d *QuarkUCTV) getTranscodingLink(ctx context.Context, file model.Obj) (*mo
}
return &model.Link{
URL: fileLink.Data.VideoInfo[0].URL,
Concurrency: 3,
PartSize: 10 * utils.MB,
URL: fileLink.Data.VideoInfo[0].URL,
Concurrency: 3,
PartSize: 10 * utils.MB,
ContentLength: fileLink.Data.VideoInfo[0].Size,
}, nil
}

View File

@ -161,7 +161,6 @@ func copyFileBetween2Storages(tsk *CopyTask, srcStorage, dstStorage driver.Drive
if err != nil {
return errors.WithMessagef(err, "failed get src [%s] file", srcFilePath)
}
tsk.SetTotalBytes(srcFile.GetSize())
link, _, err := op.Link(tsk.Ctx(), srcStorage, srcFilePath, model.LinkArgs{})
if err != nil {
return errors.WithMessagef(err, "failed get [%s] link", srcFilePath)
@ -175,5 +174,6 @@ func copyFileBetween2Storages(tsk *CopyTask, srcStorage, dstStorage driver.Drive
_ = link.Close()
return errors.WithMessagef(err, "failed get [%s] stream", srcFilePath)
}
tsk.SetTotalBytes(ss.GetSize())
return op.Put(tsk.Ctx(), dstStorage, dstDirPath, ss, tsk.SetProgress, true)
}

View File

@ -33,8 +33,9 @@ type Link struct {
Expiration *time.Duration // local cache expire Duration
//for accelerating request, use multi-thread downloading
Concurrency int `json:"concurrency"`
PartSize int `json:"part_size"`
Concurrency int `json:"concurrency"`
PartSize int `json:"part_size"`
ContentLength int64 `json:"-"` // 转码视频、缩略图
utils.SyncClosers `json:"-"`
}

View File

@ -291,7 +291,7 @@ func transferObjFile(t *TransferTask) error {
_ = link.Close()
return errors.WithMessagef(err, "failed get [%s] stream", t.SrcObjPath)
}
t.SetTotalBytes(srcFile.GetSize())
t.SetTotalBytes(ss.GetSize())
return op.Put(t.Ctx(), t.DstStorage, t.DstDirPath, ss, t.SetProgress)
}

View File

@ -258,7 +258,8 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
if file.IsDir() {
return nil, nil, errors.WithStack(errs.NotFile)
}
key := Key(storage, path)
key := stdpath.Join(Key(storage, path), args.Type)
if link, ok := linkCache.Get(key); ok {
return link, file, nil
}
@ -278,6 +279,9 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
if storage.Config().OnlyLinkMFile {
link, err := fn()
if err != nil {
return nil, nil, err
}
return link, file, err
}
@ -295,6 +299,10 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
link.AcquireReference()
}
}
if err != nil {
return nil, nil, err
}
return link, file, err
}

View File

@ -161,6 +161,7 @@ type SeekableStream struct {
*FileStream
// should have one of belows to support rangeRead
rangeReadCloser model.RangeReadCloserIF
size int64
}
func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error) {
@ -174,7 +175,11 @@ func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error
}
if link != nil {
rr, err := GetRangeReaderFromLink(fs.GetSize(), link)
size := link.ContentLength
if size <= 0 {
size = fs.GetSize()
}
rr, err := GetRangeReaderFromLink(size, link)
if err != nil {
return nil, err
}
@ -184,18 +189,25 @@ func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error
return nil, err
}
fs.Add(link)
return &SeekableStream{FileStream: fs}, nil
return &SeekableStream{FileStream: fs, size: size}, nil
}
rrc := &model.RangeReadCloser{
RangeReader: rr,
}
fs.Add(link)
fs.Add(rrc)
return &SeekableStream{FileStream: fs, rangeReadCloser: rrc}, nil
return &SeekableStream{FileStream: fs, rangeReadCloser: rrc, size: size}, nil
}
return nil, fmt.Errorf("illegal seekableStream")
}
func (ss *SeekableStream) GetSize() int64 {
if ss.size > 0 {
return ss.size
}
return ss.FileStream.GetSize()
}
//func (ss *SeekableStream) Peek(length int) {
//
//}

View File

@ -18,25 +18,33 @@ import (
func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error {
if link.MFile != nil {
attachHeader(w, file, link.Header)
attachHeader(w, file, link)
http.ServeContent(w, r, file.GetName(), file.ModTime(), link.MFile)
return nil
}
if link.Concurrency > 0 || link.PartSize > 0 {
attachHeader(w, file, link.Header)
rrf, _ := stream.GetRangeReaderFromLink(file.GetSize(), link)
attachHeader(w, file, link)
size := link.ContentLength
if size <= 0 {
size = file.GetSize()
}
rrf, _ := stream.GetRangeReaderFromLink(size, link)
if link.RangeReader == nil {
r = r.WithContext(context.WithValue(r.Context(), conf.RequestHeaderKey, r.Header))
}
return net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &model.RangeReadCloser{
return net.ServeHTTP(w, r, file.GetName(), file.ModTime(), size, &model.RangeReadCloser{
RangeReader: rrf,
})
}
if link.RangeReader != nil {
attachHeader(w, file, link.Header)
return net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &model.RangeReadCloser{
attachHeader(w, file, link)
size := link.ContentLength
if size <= 0 {
size = file.GetSize()
}
return net.ServeHTTP(w, r, file.GetName(), file.ModTime(), size, &model.RangeReadCloser{
RangeReader: link.RangeReader,
})
}
@ -61,17 +69,23 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
})
return err
}
func attachHeader(w http.ResponseWriter, file model.Obj, header http.Header) {
func attachHeader(w http.ResponseWriter, file model.Obj, link *model.Link) {
fileName := file.GetName()
w.Header().Set("Content-Disposition", utils.GenerateContentDisposition(fileName))
w.Header().Set("Content-Type", utils.GetMimeType(fileName))
w.Header().Set("Etag", GetEtag(file))
contentType := header.Get("Content-Type")
size := link.ContentLength
if size <= 0 {
size = file.GetSize()
}
w.Header().Set("Etag", GetEtag(file, size))
contentType := link.Header.Get("Content-Type")
if len(contentType) > 0 {
w.Header().Set("Content-Type", contentType)
} else {
w.Header().Set("Content-Type", utils.GetMimeType(fileName))
}
}
func GetEtag(file model.Obj) string {
func GetEtag(file model.Obj, size int64) string {
hash := ""
for _, v := range file.GetHash().Export() {
if v > hash {
@ -82,20 +96,23 @@ func GetEtag(file model.Obj) string {
return fmt.Sprintf(`"%s"`, hash)
}
// 参考nginx
return fmt.Sprintf(`"%x-%x"`, file.ModTime().Unix(), file.GetSize())
return fmt.Sprintf(`"%x-%x"`, file.ModTime().Unix(), size)
}
func ProxyRange(ctx context.Context, link *model.Link, size int64) {
if link.MFile != nil {
return
}
if link.RangeReader == nil && !strings.HasPrefix(link.URL, GetApiUrl(ctx)+"/") {
rrf, err := stream.GetRangeReaderFromLink(size, link)
if err != nil {
return
func ProxyRange(ctx context.Context, link *model.Link, size int64) *model.Link {
if link.MFile == nil && link.RangeReader == nil && !strings.HasPrefix(link.URL, GetApiUrl(ctx)+"/") {
if link.ContentLength > 0 {
size = link.ContentLength
}
rrf, err := stream.GetRangeReaderFromLink(size, link)
if err == nil {
return &model.Link{
RangeReader: rrf,
ContentLength: size,
}
}
link.RangeReader = rrf
}
return link
}
type InterceptResponseWriter struct {

View File

@ -119,7 +119,7 @@ func proxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) {
}
}
if proxyRange {
common.ProxyRange(c, link, file.GetSize())
link = common.ProxyRange(c, link, file.GetSize())
}
Writer := &common.WrittenResponseWriter{ResponseWriter: c.Writer}

View File

@ -171,7 +171,10 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
}
}()
size := file.GetSize()
size := link.ContentLength
if size <= 0 {
size = file.GetSize()
}
rnge, err := rangeRequest.Range(size)
if err != nil {
return nil, err

View File

@ -476,10 +476,7 @@ func findETag(ctx context.Context, ls LockSystem, name string, fi model.Obj) (st
return etag, err
}
}
// The Apache http 2.4 web server by default concatenates the
// modification time and size of a file. We replicate the heuristic
// with nanosecond granularity.
return common.GetEtag(fi), nil
return common.GetEtag(fi, fi.GetSize()), nil
}
func findSupportedLock(ctx context.Context, ls LockSystem, name string, fi model.Obj) (string, error) {

View File

@ -233,10 +233,6 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta
if err != nil {
return http.StatusNotFound, err
}
if r.Method == http.MethodHead {
w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.GetSize()))
return http.StatusOK, nil
}
if fi.IsDir() {
return http.StatusMethodNotAllowed, nil
}
@ -250,7 +246,7 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta
}
defer link.Close()
if storage.GetStorage().ProxyRange {
common.ProxyRange(ctx, link, fi.GetSize())
link = common.ProxyRange(ctx, link, fi.GetSize())
}
err = common.Proxy(w, r, link, fi)
if err != nil {