mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-09-20 04:36:09 +08:00
Compare commits
1 Commits
fix-user
...
renovate/g
Author | SHA1 | Date | |
---|---|---|---|
18c0f551fe |
38
.github/workflows/sync_repo.yml
vendored
38
.github/workflows/sync_repo.yml
vendored
@ -1,38 +0,0 @@
|
||||
name: Sync to Gitee
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
sync:
|
||||
runs-on: ubuntu-latest
|
||||
name: Sync GitHub to Gitee
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup SSH
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
echo "${{ secrets.GITEE_SSH_PRIVATE_KEY }}" > ~/.ssh/id_rsa
|
||||
chmod 600 ~/.ssh/id_rsa
|
||||
ssh-keyscan gitee.com >> ~/.ssh/known_hosts
|
||||
|
||||
- name: Create single commit and push
|
||||
run: |
|
||||
git config user.name "GitHub Actions"
|
||||
git config user.email "actions@github.com"
|
||||
|
||||
# Create a new branch
|
||||
git checkout --orphan new-main
|
||||
git add .
|
||||
git commit -m "Sync from GitHub: $(date)"
|
||||
|
||||
# Add Gitee remote and force push
|
||||
git remote add gitee ${{ vars.GITEE_REPO_URL }}
|
||||
git push --force gitee new-main:main
|
20
Dockerfile
20
Dockerfile
@ -1,6 +1,3 @@
|
||||
### Default image is base. You can add other support by modifying BASE_IMAGE_TAG. The following parameters are supported: base (default), aria2, ffmpeg, aio
|
||||
ARG BASE_IMAGE_TAG=base
|
||||
|
||||
FROM alpine:edge AS builder
|
||||
LABEL stage=go-builder
|
||||
WORKDIR /app/
|
||||
@ -10,24 +7,21 @@ RUN go mod download
|
||||
COPY ./ ./
|
||||
RUN bash build.sh release docker
|
||||
|
||||
### Default image is base. You can add other support by modifying BASE_IMAGE_TAG. The following parameters are supported: base (default), aria2, ffmpeg, aio
|
||||
ARG BASE_IMAGE_TAG=base
|
||||
FROM openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
|
||||
LABEL MAINTAINER="OpenList"
|
||||
|
||||
ARG INSTALL_FFMPEG=false
|
||||
ARG INSTALL_ARIA2=false
|
||||
LABEL MAINTAINER="OpenList"
|
||||
|
||||
WORKDIR /opt/openlist/
|
||||
|
||||
RUN addgroup -g 1001 openlist && \
|
||||
adduser -D -u 1001 -G openlist openlist && \
|
||||
mkdir -p /opt/openlist/data
|
||||
|
||||
COPY --from=builder --chmod=755 --chown=1001:1001 /app/bin/openlist ./
|
||||
COPY --chmod=755 --chown=1001:1001 entrypoint.sh /entrypoint.sh
|
||||
|
||||
USER openlist
|
||||
COPY --chmod=755 --from=builder /app/bin/openlist ./
|
||||
COPY --chmod=755 entrypoint.sh /entrypoint.sh
|
||||
RUN /entrypoint.sh version
|
||||
|
||||
ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
VOLUME /opt/openlist/data/
|
||||
EXPOSE 5244 5245
|
||||
CMD [ "/entrypoint.sh" ]
|
||||
|
@ -1,23 +1,18 @@
|
||||
ARG BASE_IMAGE_TAG=base
|
||||
FROM ghcr.io/openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
|
||||
LABEL MAINTAINER="OpenList"
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG INSTALL_FFMPEG=false
|
||||
ARG INSTALL_ARIA2=false
|
||||
LABEL MAINTAINER="OpenList"
|
||||
|
||||
WORKDIR /opt/openlist/
|
||||
|
||||
RUN addgroup -g 1001 openlist && \
|
||||
adduser -D -u 1001 -G openlist openlist && \
|
||||
mkdir -p /opt/openlist/data
|
||||
|
||||
COPY --chmod=755 --chown=1001:1001 /build/${TARGETPLATFORM}/openlist ./
|
||||
COPY --chmod=755 --chown=1001:1001 entrypoint.sh /entrypoint.sh
|
||||
|
||||
USER openlist
|
||||
COPY --chmod=755 /build/${TARGETPLATFORM}/openlist ./
|
||||
COPY --chmod=755 entrypoint.sh /entrypoint.sh
|
||||
RUN /entrypoint.sh version
|
||||
|
||||
ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
VOLUME /opt/openlist/data/
|
||||
EXPOSE 5244 5245
|
||||
CMD [ "/entrypoint.sh" ]
|
||||
CMD [ "/entrypoint.sh" ]
|
@ -6,9 +6,10 @@ services:
|
||||
ports:
|
||||
- '5244:5244'
|
||||
- '5245:5245'
|
||||
user: '0:0'
|
||||
environment:
|
||||
- PUID=0
|
||||
- PGID=0
|
||||
- UMASK=022
|
||||
- TZ=Asia/Shanghai
|
||||
- TZ=UTC
|
||||
container_name: openlist
|
||||
image: 'openlistteam/openlist:latest'
|
||||
|
@ -186,7 +186,9 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
preHash = strings.ToUpper(preHash)
|
||||
fullHash := stream.GetHash().GetHash(utils.SHA1)
|
||||
if len(fullHash) != utils.SHA1.Width {
|
||||
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -321,7 +321,7 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
|
||||
err error
|
||||
)
|
||||
|
||||
tmpF, err := s.CacheFullAndWriter(&up, nil)
|
||||
tmpF, err := s.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -239,7 +239,9 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
||||
}
|
||||
sha1 := file.GetHash().GetHash(utils.SHA1)
|
||||
if len(sha1) != utils.SHA1.Width {
|
||||
_, sha1, err = stream.CacheFullAndHash(file, &up, utils.SHA1)
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, sha1, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.SHA1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -86,14 +86,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
|
||||
|
||||
fileSize := stream.GetSize()
|
||||
chunkSize := calPartSize(fileSize)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
|
||||
parts := make([]oss.UploadPart, partNum)
|
||||
offset := int64(0)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := int64(1); i <= partNum; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -120,7 +119,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second))
|
||||
ss.FreeSectionReader(rd)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -182,7 +182,9 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
|
||||
etag := file.GetHash().GetHash(utils.MD5)
|
||||
var err error
|
||||
if len(etag) < utils.MD5.Width {
|
||||
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -81,12 +81,6 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
if size > chunkSize {
|
||||
chunkCount = int((size + chunkSize - 1) / chunkSize)
|
||||
}
|
||||
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastChunkSize := size % chunkSize
|
||||
if lastChunkSize == 0 {
|
||||
lastChunkSize = chunkSize
|
||||
@ -98,6 +92,10 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
batchSize = 10
|
||||
getS3UploadUrl = d.getS3PreSignedUrls
|
||||
}
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
thread := min(int(chunkCount), d.UploadThread)
|
||||
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
||||
@ -182,7 +180,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
ss.FreeSectionReader(reader)
|
||||
ss.RecycleSectionReader(reader)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
@ -69,45 +69,13 @@ func (d *Open123) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
||||
func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
fileId, _ := strconv.ParseInt(file.GetID(), 10, 64)
|
||||
|
||||
if d.DirectLink {
|
||||
res, err := d.getDirectLink(fileId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if d.DirectLinkPrivateKey == "" {
|
||||
duration := 365 * 24 * time.Hour // 缓存1年
|
||||
return &model.Link{
|
||||
URL: res.Data.URL,
|
||||
Expiration: &duration,
|
||||
}, nil
|
||||
}
|
||||
|
||||
u, err := d.getUserInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
duration := time.Duration(d.DirectLinkValidDuration) * time.Minute
|
||||
|
||||
newURL, err := d.SignURL(res.Data.URL, d.DirectLinkPrivateKey,
|
||||
u.Data.UID, duration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: newURL,
|
||||
Expiration: &duration,
|
||||
}, nil
|
||||
}
|
||||
|
||||
res, err := d.getDownloadInfo(fileId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Link{URL: res.Data.DownloadUrl}, nil
|
||||
link := model.Link{URL: res.Data.DownloadUrl}
|
||||
return &link, nil
|
||||
}
|
||||
|
||||
func (d *Open123) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
@ -164,7 +132,9 @@ func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
||||
// etag 文件md5
|
||||
etag := file.GetHash().GetHash(utils.MD5)
|
||||
if len(etag) < utils.MD5.Width {
|
||||
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -23,11 +23,6 @@ type Addition struct {
|
||||
// 上传线程数
|
||||
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
|
||||
|
||||
// 使用直链
|
||||
DirectLink bool `json:"DirectLink" type:"boolean" default:"false" required:"false" help:"use direct link when download file"`
|
||||
DirectLinkPrivateKey string `json:"DirectLinkPrivateKey" required:"false" help:"private key for direct link, if URL authentication is enabled"`
|
||||
DirectLinkValidDuration int64 `json:"DirectLinkValidDuration" type:"number" default:"30" required:"false" help:"minutes, if URL authentication is enabled"`
|
||||
|
||||
driver.RootID
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ type RefreshTokenResp struct {
|
||||
type UserInfoResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
UID uint64 `json:"uid"`
|
||||
UID int64 `json:"uid"`
|
||||
Username string `json:"username"`
|
||||
DisplayName string `json:"displayName"`
|
||||
HeadImage string `json:"headImage"`
|
||||
@ -158,13 +158,6 @@ type DownloadInfoResp struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type DirectLinkResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
URL string `json:"url"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// 创建文件V2返回
|
||||
type UploadCreateResp struct {
|
||||
BaseResp
|
||||
|
@ -46,12 +46,6 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
||||
uploadDomain := createResp.Data.Servers[0]
|
||||
size := file.GetSize()
|
||||
chunkSize := createResp.Data.SliceSize
|
||||
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadNums := (size + chunkSize - 1) / chunkSize
|
||||
thread := min(int(uploadNums), d.UploadThread)
|
||||
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
||||
@ -59,6 +53,10 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for partIndex := range uploadNums {
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
break
|
||||
@ -70,8 +68,6 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
||||
var reader *stream.SectionReader
|
||||
var rateLimitedRd io.Reader
|
||||
sliceMD5 := ""
|
||||
// 表单
|
||||
b := bytes.NewBuffer(make([]byte, 0, 2048))
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
if reader == nil {
|
||||
@ -86,6 +82,7 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@ -93,8 +90,9 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
||||
// 重置分片reader位置,因为HashReader、上一次失败已经读取到分片EOF
|
||||
reader.Seek(0, io.SeekStart)
|
||||
|
||||
b.Reset()
|
||||
w := multipart.NewWriter(b)
|
||||
// 创建表单数据
|
||||
var b bytes.Buffer
|
||||
w := multipart.NewWriter(&b)
|
||||
// 添加表单字段
|
||||
err = w.WriteField("preuploadID", createResp.Data.PreuploadID)
|
||||
if err != nil {
|
||||
@ -109,20 +107,21 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
||||
return err
|
||||
}
|
||||
// 写入文件内容
|
||||
_, err = w.CreateFormFile("slice", fmt.Sprintf("%s.part%d", file.GetName(), partNumber))
|
||||
fw, err := w.CreateFormFile("slice", fmt.Sprintf("%s.part%d", file.GetName(), partNumber))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = utils.CopyWithBuffer(fw, rateLimitedRd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headSize := b.Len()
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
head := bytes.NewReader(b.Bytes()[:headSize])
|
||||
tail := bytes.NewReader(b.Bytes()[headSize:])
|
||||
rateLimitedRd = driver.NewLimitedUploadStream(ctx, io.MultiReader(head, reader, tail))
|
||||
|
||||
// 创建请求并设置header
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadDomain+"/upload/v2/file/slice", rateLimitedRd)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadDomain+"/upload/v2/file/slice", &b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -158,7 +157,7 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
ss.FreeSectionReader(reader)
|
||||
ss.RecycleSectionReader(reader)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
@ -1,20 +1,15 @@
|
||||
package _123_open
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/google/uuid"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -25,8 +20,7 @@ var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于
|
||||
RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1)
|
||||
UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1)
|
||||
FileList = InitApiInfo(Api+"/api/v2/file/list", 3)
|
||||
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 5)
|
||||
DirectLink = InitApiInfo(Api+"/api/v1/direct-link/url", 5)
|
||||
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 0)
|
||||
Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2)
|
||||
Move = InitApiInfo(Api+"/api/v1/file/move", 1)
|
||||
Rename = InitApiInfo(Api+"/api/v1/file/name", 1)
|
||||
@ -118,33 +112,6 @@ func (d *Open123) flushAccessToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Open123) SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (newURL string, err error) {
|
||||
// 生成Unix时间戳
|
||||
ts := time.Now().Add(validDuration).Unix()
|
||||
|
||||
// 生成随机数(建议使用UUID,不能包含中划线(-))
|
||||
rand := strings.ReplaceAll(uuid.New().String(), "-", "")
|
||||
|
||||
// 解析URL
|
||||
objURL, err := url.Parse(originURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 待签名字符串,格式:path-timestamp-rand-uid-privateKey
|
||||
unsignedStr := fmt.Sprintf("%s-%d-%s-%d-%s", objURL.Path, ts, rand, uid, privateKey)
|
||||
md5Hash := md5.Sum([]byte(unsignedStr))
|
||||
// 生成鉴权参数,格式:timestamp-rand-uid-md5hash
|
||||
authKey := fmt.Sprintf("%d-%s-%d-%x", ts, rand, uid, md5Hash)
|
||||
|
||||
// 添加鉴权参数到URL查询参数
|
||||
v := objURL.Query()
|
||||
v.Add("auth_key", authKey)
|
||||
objURL.RawQuery = v.Encode()
|
||||
|
||||
return objURL.String(), nil
|
||||
}
|
||||
|
||||
func (d *Open123) getUserInfo() (*UserInfoResp, error) {
|
||||
var resp UserInfoResp
|
||||
|
||||
@ -192,21 +159,6 @@ func (d *Open123) getDownloadInfo(fileId int64) (*DownloadInfoResp, error) {
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) getDirectLink(fileId int64) (*DirectLinkResp, error) {
|
||||
var resp DirectLinkResp
|
||||
|
||||
_, err := d.Request(DirectLink, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"fileId": strconv.FormatInt(fileId, 10),
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) mkdir(parentID int64, name string) error {
|
||||
_, err := d.Request(Mkdir, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
|
@ -522,7 +522,9 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
var err error
|
||||
fullHash := stream.GetHash().GetHash(utils.SHA256)
|
||||
if len(fullHash) != utils.SHA256.Width {
|
||||
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA256)
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -5,19 +5,17 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/skip2/go-qrcode"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/skip2/go-qrcode"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
|
||||
"github.com/go-resty/resty/v2"
|
||||
@ -313,14 +311,11 @@ func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
|
||||
|
||||
// 旧版本上传,家庭云不支持覆盖
|
||||
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||
fileMd5 := file.GetHash().GetHash(utils.MD5)
|
||||
var tempFile = file.GetFile()
|
||||
var err error
|
||||
if len(fileMd5) != utils.MD5.Width {
|
||||
tempFile, fileMd5, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||
} else if tempFile == nil {
|
||||
tempFile, err = file.CacheFullAndWriter(&up, nil)
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -350,7 +345,7 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
||||
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
||||
}
|
||||
|
||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, tempFile, isFamily)
|
||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily)
|
||||
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -500,8 +500,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss, err := stream.NewStreamSectionReader(file, int(sliceSize), &up)
|
||||
ss, err := stream.NewStreamSectionReader(file, int(sliceSize))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -582,7 +581,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
ss.FreeSectionReader(reader)
|
||||
ss.RecycleSectionReader(reader)
|
||||
},
|
||||
},
|
||||
)
|
||||
@ -858,7 +857,9 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo
|
||||
|
||||
// 旧版本上传,家庭云不支持覆盖
|
||||
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||
tempFile, fileMd5, err := stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
tempFile, fileMd5, err := stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
@ -13,7 +12,6 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
@ -162,18 +160,25 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
sign.Sign(reqPath)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
resultLink := *link
|
||||
resultLink.SyncClosers = utils.NewSyncClosers(link)
|
||||
if args.Redirect {
|
||||
return &resultLink, nil
|
||||
return link, nil
|
||||
}
|
||||
|
||||
resultLink := &model.Link{
|
||||
URL: link.URL,
|
||||
Header: link.Header,
|
||||
RangeReader: link.RangeReader,
|
||||
MFile: link.MFile,
|
||||
Concurrency: link.Concurrency,
|
||||
PartSize: link.PartSize,
|
||||
ContentLength: link.ContentLength,
|
||||
SyncClosers: utils.NewSyncClosers(link),
|
||||
}
|
||||
if resultLink.ContentLength == 0 {
|
||||
resultLink.ContentLength = fi.GetSize()
|
||||
}
|
||||
if resultLink.MFile != nil {
|
||||
return &resultLink, nil
|
||||
return resultLink, nil
|
||||
}
|
||||
if d.DownloadConcurrency > 0 {
|
||||
resultLink.Concurrency = d.DownloadConcurrency
|
||||
@ -181,7 +186,7 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
if d.DownloadPartSize > 0 {
|
||||
resultLink.PartSize = d.DownloadPartSize * utils.KB
|
||||
}
|
||||
return &resultLink, nil
|
||||
return resultLink, nil
|
||||
}
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
@ -308,29 +313,24 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer,
|
||||
reqPath, err := d.getReqPath(ctx, dstDir, true)
|
||||
if err == nil {
|
||||
if len(reqPath) == 1 {
|
||||
storage, reqActualPath, err := op.GetStorageAndActualPath(*reqPath[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return op.Put(ctx, storage, reqActualPath, &stream.FileStream{
|
||||
Obj: s,
|
||||
Mimetype: s.GetMimetype(),
|
||||
Reader: s,
|
||||
}, up)
|
||||
return fs.PutDirectly(ctx, *reqPath[0], &stream.FileStream{
|
||||
Obj: s,
|
||||
Mimetype: s.GetMimetype(),
|
||||
WebPutAsTask: s.NeedStore(),
|
||||
Reader: s,
|
||||
})
|
||||
} else {
|
||||
file, err := s.CacheFullAndWriter(nil, nil)
|
||||
file, err := s.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
count := float64(len(reqPath) + 1)
|
||||
up(100 / count)
|
||||
for i, path := range reqPath {
|
||||
for _, path := range reqPath {
|
||||
err = errors.Join(err, fs.PutDirectly(ctx, *path, &stream.FileStream{
|
||||
Obj: s,
|
||||
Mimetype: s.GetMimetype(),
|
||||
Reader: file,
|
||||
Obj: s,
|
||||
Mimetype: s.GetMimetype(),
|
||||
WebPutAsTask: s.NeedStore(),
|
||||
Reader: file,
|
||||
}))
|
||||
up(float64(i+2) / float64(count) * 100)
|
||||
_, e := file.Seek(0, io.SeekStart)
|
||||
if e != nil {
|
||||
return errors.Join(err, e)
|
||||
@ -402,24 +402,10 @@ func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveIn
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
reqPath := stdpath.Join(dst, sub)
|
||||
link, err := d.extract(ctx, reqPath, args)
|
||||
if err != nil {
|
||||
continue
|
||||
link, err := d.extract(ctx, dst, sub, args)
|
||||
if err == nil {
|
||||
return link, nil
|
||||
}
|
||||
if link == nil {
|
||||
return &model.Link{
|
||||
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(reqPath, true),
|
||||
utils.EncodePath(args.InnerPath, true),
|
||||
url.QueryEscape(args.Password),
|
||||
sign.SignArchive(reqPath)),
|
||||
}, nil
|
||||
}
|
||||
resultLink := *link
|
||||
resultLink.SyncClosers = utils.NewSyncClosers(link)
|
||||
return &resultLink, nil
|
||||
}
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
@ -2,6 +2,8 @@ package alias
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
@ -10,6 +12,8 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
)
|
||||
|
||||
@ -136,7 +140,8 @@ func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.Arc
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Alias) extract(ctx context.Context, reqPath string, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
reqPath := stdpath.Join(dst, sub)
|
||||
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -144,12 +149,20 @@ func (d *Alias) extract(ctx context.Context, reqPath string, args model.ArchiveI
|
||||
if _, ok := storage.(driver.ArchiveReader); !ok {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(reqPath)) {
|
||||
_, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||
if err == nil {
|
||||
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(sub)) {
|
||||
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, nil
|
||||
link := &model.Link{
|
||||
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(reqPath, true),
|
||||
utils.EncodePath(args.InnerPath, true),
|
||||
url.QueryEscape(args.Password),
|
||||
sign.SignArchive(reqPath)),
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
|
||||
return link, err
|
||||
|
@ -191,7 +191,9 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||
|
||||
hash := stream.GetHash().GetHash(utils.SHA1)
|
||||
if len(hash) != utils.SHA1.Width {
|
||||
_, hash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, hash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -216,13 +218,14 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||
if !createResp.RapidUpload {
|
||||
// 2. normal upload
|
||||
log.Debugf("[aliyundive_open] normal upload")
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize), &up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
preTime := time.Now()
|
||||
var offset, length int64 = 0, partSize
|
||||
//var length
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < len(createResp.PartInfoList); i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
@ -250,7 +253,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second))
|
||||
ss.FreeSectionReader(rd)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -48,7 +48,6 @@ import (
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_app"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_sharelink"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist_share"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak_share"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_open"
|
||||
|
@ -237,16 +237,15 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
|
||||
}
|
||||
|
||||
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadUrl := u.UploadURLs[0]
|
||||
credential := u.Credential
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -295,7 +294,7 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.FreeSectionReader(rd)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -307,14 +306,13 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
||||
}
|
||||
|
||||
func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||
uploadUrl := u.UploadURLs[0]
|
||||
var finish int64 = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadUrl := u.UploadURLs[0]
|
||||
var finish int64 = 0
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -355,7 +353,7 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.FreeSectionReader(rd)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -369,15 +367,14 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
||||
}
|
||||
|
||||
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
var etags []string
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -418,7 +415,7 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.FreeSectionReader(rd)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -252,16 +252,15 @@ func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u Fi
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadUrl := u.UploadUrls[0]
|
||||
credential := u.Credential
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -310,7 +309,7 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.FreeSectionReader(rd)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -322,14 +321,13 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||
uploadUrl := u.UploadUrls[0]
|
||||
var finish int64 = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
|
||||
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadUrl := u.UploadUrls[0]
|
||||
var finish int64 = 0
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -371,7 +369,7 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.FreeSectionReader(rd)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -385,15 +383,14 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
var etags []string
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -435,7 +432,7 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.FreeSectionReader(rd)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -401,6 +401,7 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
|
||||
},
|
||||
Reader: wrappedIn,
|
||||
Mimetype: "application/octet-stream",
|
||||
WebPutAsTask: streamer.NeedStore(),
|
||||
ForceStreamUpload: true,
|
||||
Exist: streamer.GetExist(),
|
||||
}
|
||||
|
@ -449,11 +449,10 @@ func (d *Doubao) uploadNode(uploadConfig *UploadConfig, dir model.Obj, file mode
|
||||
|
||||
// Upload 普通上传实现
|
||||
func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
|
||||
ss, err := stream.NewStreamSectionReader(file, int(file.GetSize()), &up)
|
||||
ss, err := stream.NewStreamSectionReader(file, int(file.GetSize()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader, err := ss.GetSectionReader(0, file.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -504,7 +503,7 @@ func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.
|
||||
}
|
||||
return nil
|
||||
})
|
||||
ss.FreeSectionReader(reader)
|
||||
ss.RecycleSectionReader(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -543,15 +542,15 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
|
||||
if config.InnerUploadAddress.AdvanceOption.SliceSize > 0 {
|
||||
chunkSize = int64(config.InnerUploadAddress.AdvanceOption.SliceSize)
|
||||
}
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalParts := (fileSize + chunkSize - 1) / chunkSize
|
||||
// 创建分片信息组
|
||||
parts := make([]UploadPart, totalParts)
|
||||
|
||||
// 用 stream.NewStreamSectionReader 替代缓存临时文件
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up(10.0) // 更新进度
|
||||
// 设置并行上传
|
||||
thread := min(int(totalParts), d.uploadThread)
|
||||
@ -642,7 +641,7 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
ss.FreeSectionReader(reader)
|
||||
ss.RecycleSectionReader(reader)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
@ -13,7 +13,7 @@ type Addition struct {
|
||||
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
|
||||
AccessToken string
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
RootNamespaceId string `json:"RootNamespaceId" required:"false"`
|
||||
RootNamespaceId string
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -175,13 +175,6 @@ func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
|
||||
if d.RootNamespaceId != "" {
|
||||
apiPathRootJson, err := d.buildPathRootHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Dropbox-API-Path-Root", apiPathRootJson)
|
||||
}
|
||||
|
||||
uploadFinishArgs := UploadFinishArgs{
|
||||
Commit: struct {
|
||||
@ -226,13 +219,6 @@ func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
|
||||
if d.RootNamespaceId != "" {
|
||||
apiPathRootJson, err := d.buildPathRootHeader()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
req.Header.Set("Dropbox-API-Path-Root", apiPathRootJson)
|
||||
}
|
||||
req.Header.Set("Dropbox-API-Arg", "{\"close\":false}")
|
||||
|
||||
res, err := base.HttpClient.Do(req)
|
||||
@ -247,11 +233,3 @@ func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
|
||||
_ = res.Body.Close()
|
||||
return sessionId, nil
|
||||
}
|
||||
|
||||
func (d *Dropbox) buildPathRootHeader() (string, error) {
|
||||
return utils.Json.MarshalToString(map[string]interface{}{
|
||||
".tag": "root",
|
||||
"root": d.RootNamespaceId,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -162,7 +162,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||
SetBody(driver.NewLimitedUploadStream(ctx, stream))
|
||||
}, nil)
|
||||
} else {
|
||||
err = d.chunkUpload(ctx, stream, putUrl, up)
|
||||
err = d.chunkUpload(ctx, stream, putUrl)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -254,14 +254,13 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
|
||||
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string) error {
|
||||
var defaultChunkSize = d.ChunkSize * 1024 * 1024
|
||||
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)
|
||||
var offset int64 = 0
|
||||
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var offset int64 = 0
|
||||
url += "?includeItemsFromAllDrives=true&supportsAllDrives=true"
|
||||
for offset < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
@ -301,13 +300,12 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer,
|
||||
}
|
||||
return fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
|
||||
}
|
||||
up(float64(offset+chunkSize) / float64(file.GetSize()) * 100)
|
||||
return nil
|
||||
},
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second))
|
||||
ss.FreeSectionReader(reader)
|
||||
ss.RecycleSectionReader(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -276,7 +276,9 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
|
||||
etag := s.GetHash().GetHash(utils.MD5)
|
||||
var err error
|
||||
if len(etag) != utils.MD5.Width {
|
||||
_, etag, err = stream.CacheFullAndHash(s, &up, utils.MD5)
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, etag, err = stream.CacheFullInTempFileAndHash(s, cacheFileProgress, utils.MD5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -180,7 +180,7 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, file model.FileS
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tempFile, err := file.CacheFullAndWriter(&up, nil)
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -263,7 +263,7 @@ func (d *MoPan) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
file, err := stream.CacheFullAndWriter(&up, nil)
|
||||
file, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -223,7 +223,7 @@ func (d *NeteaseMusic) removeSongObj(file model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *NeteaseMusic) putSongStream(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tmp, err := stream.CacheFullAndWriter(&up, nil)
|
||||
tmp, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -238,14 +238,13 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||
var finish int64 = 0
|
||||
DEFAULT := d.ChunkSize * 1024 * 1024
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||
var finish int64 = 0
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -286,7 +285,7 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.FreeSectionReader(rd)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -152,14 +152,13 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||
var finish int64 = 0
|
||||
DEFAULT := d.ChunkSize * 1024 * 1024
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||
var finish int64 = 0
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -200,7 +199,7 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.FreeSectionReader(rd)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,181 +0,0 @@
|
||||
package openlist_share
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
type OpenListShare struct {
|
||||
model.Storage
|
||||
Addition
|
||||
serverArchivePreview bool
|
||||
}
|
||||
|
||||
func (d *OpenListShare) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *OpenListShare) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *OpenListShare) Init(ctx context.Context) error {
|
||||
d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/")
|
||||
var settings common.Resp[map[string]string]
|
||||
_, _, err := d.request("/public/settings", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetResult(&settings)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.serverArchivePreview = settings.Data["share_archive_preview"] == "true"
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *OpenListShare) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *OpenListShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
var resp common.Resp[FsListResp]
|
||||
_, _, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetResult(&resp).SetBody(ListReq{
|
||||
PageReq: model.PageReq{
|
||||
Page: 1,
|
||||
PerPage: 0,
|
||||
},
|
||||
Path: stdpath.Join(fmt.Sprintf("/@s/%s", d.ShareId), dir.GetPath()),
|
||||
Password: d.Pwd,
|
||||
Refresh: false,
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var files []model.Obj
|
||||
for _, f := range resp.Data.Content {
|
||||
file := model.ObjThumb{
|
||||
Object: model.Object{
|
||||
Name: f.Name,
|
||||
Modified: f.Modified,
|
||||
Ctime: f.Created,
|
||||
Size: f.Size,
|
||||
IsFolder: f.IsDir,
|
||||
HashInfo: utils.FromString(f.HashInfo),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
|
||||
}
|
||||
files = append(files, &file)
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *OpenListShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
path := utils.FixAndCleanPath(stdpath.Join(d.ShareId, file.GetPath()))
|
||||
u := fmt.Sprintf("%s/sd%s?pwd=%s", d.Address, path, d.Pwd)
|
||||
return &model.Link{URL: u}, nil
|
||||
}
|
||||
|
||||
func (d *OpenListShare) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
if !d.serverArchivePreview || !d.ForwardArchiveReq {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
var resp common.Resp[ArchiveMetaResp]
|
||||
_, code, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetResult(&resp).SetBody(ArchiveMetaReq{
|
||||
ArchivePass: args.Password,
|
||||
Path: stdpath.Join(fmt.Sprintf("/@s/%s", d.ShareId), obj.GetPath()),
|
||||
Password: d.Pwd,
|
||||
Refresh: false,
|
||||
})
|
||||
})
|
||||
if code == 202 {
|
||||
return nil, errs.WrongArchivePassword
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var tree []model.ObjTree
|
||||
if resp.Data.Content != nil {
|
||||
tree = make([]model.ObjTree, 0, len(resp.Data.Content))
|
||||
for _, content := range resp.Data.Content {
|
||||
tree = append(tree, &content)
|
||||
}
|
||||
}
|
||||
return &model.ArchiveMetaInfo{
|
||||
Comment: resp.Data.Comment,
|
||||
Encrypted: resp.Data.Encrypted,
|
||||
Tree: tree,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *OpenListShare) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
if !d.serverArchivePreview || !d.ForwardArchiveReq {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
var resp common.Resp[ArchiveListResp]
|
||||
_, code, err := d.request("/fs/archive/list", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetResult(&resp).SetBody(ArchiveListReq{
|
||||
ArchiveMetaReq: ArchiveMetaReq{
|
||||
ArchivePass: args.Password,
|
||||
Path: stdpath.Join(fmt.Sprintf("/@s/%s", d.ShareId), obj.GetPath()),
|
||||
Password: d.Pwd,
|
||||
Refresh: false,
|
||||
},
|
||||
PageReq: model.PageReq{
|
||||
Page: 1,
|
||||
PerPage: 0,
|
||||
},
|
||||
InnerPath: args.InnerPath,
|
||||
})
|
||||
})
|
||||
if code == 202 {
|
||||
return nil, errs.WrongArchivePassword
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var files []model.Obj
|
||||
for _, f := range resp.Data.Content {
|
||||
file := model.ObjThumb{
|
||||
Object: model.Object{
|
||||
Name: f.Name,
|
||||
Modified: f.Modified,
|
||||
Ctime: f.Created,
|
||||
Size: f.Size,
|
||||
IsFolder: f.IsDir,
|
||||
HashInfo: utils.FromString(f.HashInfo),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
|
||||
}
|
||||
files = append(files, &file)
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *OpenListShare) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
if !d.serverArchivePreview || !d.ForwardArchiveReq {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
path := utils.FixAndCleanPath(stdpath.Join(d.ShareId, obj.GetPath()))
|
||||
u := fmt.Sprintf("%s/sad%s?pwd=%s&inner=%s&pass=%s",
|
||||
d.Address,
|
||||
path,
|
||||
d.Pwd,
|
||||
utils.EncodePath(args.InnerPath, true),
|
||||
url.QueryEscape(args.Password))
|
||||
return &model.Link{URL: u}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*OpenListShare)(nil)
|
@ -1,27 +0,0 @@
|
||||
package openlist_share
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
Address string `json:"url" required:"true"`
|
||||
ShareId string `json:"sid" required:"true"`
|
||||
Pwd string `json:"pwd"`
|
||||
ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "OpenListShare",
|
||||
LocalSort: true,
|
||||
NoUpload: true,
|
||||
DefaultRoot: "/",
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &OpenListShare{}
|
||||
})
|
||||
}
|
@ -1,111 +0,0 @@
|
||||
package openlist_share
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
)
|
||||
|
||||
type ListReq struct {
|
||||
model.PageReq
|
||||
Path string `json:"path" form:"path"`
|
||||
Password string `json:"password" form:"password"`
|
||||
Refresh bool `json:"refresh"`
|
||||
}
|
||||
|
||||
type ObjResp struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
IsDir bool `json:"is_dir"`
|
||||
Modified time.Time `json:"modified"`
|
||||
Created time.Time `json:"created"`
|
||||
Sign string `json:"sign"`
|
||||
Thumb string `json:"thumb"`
|
||||
Type int `json:"type"`
|
||||
HashInfo string `json:"hashinfo"`
|
||||
}
|
||||
|
||||
type FsListResp struct {
|
||||
Content []ObjResp `json:"content"`
|
||||
Total int64 `json:"total"`
|
||||
Readme string `json:"readme"`
|
||||
Write bool `json:"write"`
|
||||
Provider string `json:"provider"`
|
||||
}
|
||||
|
||||
type ArchiveMetaReq struct {
|
||||
ArchivePass string `json:"archive_pass"`
|
||||
Password string `json:"password"`
|
||||
Path string `json:"path"`
|
||||
Refresh bool `json:"refresh"`
|
||||
}
|
||||
|
||||
type TreeResp struct {
|
||||
ObjResp
|
||||
Children []TreeResp `json:"children"`
|
||||
hashCache *utils.HashInfo
|
||||
}
|
||||
|
||||
func (t *TreeResp) GetSize() int64 {
|
||||
return t.Size
|
||||
}
|
||||
|
||||
func (t *TreeResp) GetName() string {
|
||||
return t.Name
|
||||
}
|
||||
|
||||
func (t *TreeResp) ModTime() time.Time {
|
||||
return t.Modified
|
||||
}
|
||||
|
||||
func (t *TreeResp) CreateTime() time.Time {
|
||||
return t.Created
|
||||
}
|
||||
|
||||
func (t *TreeResp) IsDir() bool {
|
||||
return t.ObjResp.IsDir
|
||||
}
|
||||
|
||||
func (t *TreeResp) GetHash() utils.HashInfo {
|
||||
return utils.FromString(t.HashInfo)
|
||||
}
|
||||
|
||||
func (t *TreeResp) GetID() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (t *TreeResp) GetPath() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (t *TreeResp) GetChildren() []model.ObjTree {
|
||||
ret := make([]model.ObjTree, 0, len(t.Children))
|
||||
for _, child := range t.Children {
|
||||
ret = append(ret, &child)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (t *TreeResp) Thumb() string {
|
||||
return t.ObjResp.Thumb
|
||||
}
|
||||
|
||||
type ArchiveMetaResp struct {
|
||||
Comment string `json:"comment"`
|
||||
Encrypted bool `json:"encrypted"`
|
||||
Content []TreeResp `json:"content"`
|
||||
RawURL string `json:"raw_url"`
|
||||
Sign string `json:"sign"`
|
||||
}
|
||||
|
||||
type ArchiveListReq struct {
|
||||
model.PageReq
|
||||
ArchiveMetaReq
|
||||
InnerPath string `json:"inner_path"`
|
||||
}
|
||||
|
||||
type ArchiveListResp struct {
|
||||
Content []ObjResp `json:"content"`
|
||||
Total int64 `json:"total"`
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
package openlist_share
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
)
|
||||
|
||||
func (d *OpenListShare) request(api, method string, callback base.ReqCallback) ([]byte, int, error) {
|
||||
url := d.Address + "/api" + api
|
||||
req := base.RestyClient.R()
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
res, err := req.Execute(method, url)
|
||||
if err != nil {
|
||||
code := 0
|
||||
if res != nil {
|
||||
code = res.StatusCode()
|
||||
}
|
||||
return nil, code, err
|
||||
}
|
||||
if res.StatusCode() >= 400 {
|
||||
return nil, res.StatusCode(), fmt.Errorf("request failed, status: %s", res.Status())
|
||||
}
|
||||
code := utils.Json.Get(res.Body(), "code").ToInt()
|
||||
if code != 200 {
|
||||
return nil, code, fmt.Errorf("request failed, code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString())
|
||||
}
|
||||
return res.Body(), 200, nil
|
||||
}
|
@ -12,7 +12,6 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
hash_extend "github.com/OpenListTeam/OpenList/v4/pkg/utils/hash"
|
||||
"github.com/go-resty/resty/v2"
|
||||
@ -213,11 +212,15 @@ func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
sha1Str := stream.GetHash().GetHash(hash_extend.GCID)
|
||||
|
||||
hi := stream.GetHash()
|
||||
sha1Str := hi.GetHash(hash_extend.GCID)
|
||||
if len(sha1Str) < hash_extend.GCID.Width {
|
||||
var err error
|
||||
_, sha1Str, err = streamPkg.CacheFullAndHash(stream, &up, hash_extend.GCID, stream.GetSize())
|
||||
tFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sha1Str, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -438,19 +438,20 @@ func (d *PikPak) UploadByOSS(ctx context.Context, params *S3Params, s model.File
|
||||
}
|
||||
|
||||
func (d *PikPak) UploadByMultipart(ctx context.Context, params *S3Params, fileSize int64, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tmpF, err := s.CacheFullAndWriter(&up, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
chunks []oss.FileChunk
|
||||
parts []oss.UploadPart
|
||||
imur oss.InitiateMultipartUploadResult
|
||||
ossClient *oss.Client
|
||||
bucket *oss.Bucket
|
||||
err error
|
||||
)
|
||||
|
||||
tmpF, err := s.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ossClient, err = oss.New(params.Endpoint, params.AccessKeyID, params.AccessKeySecret); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
@ -157,7 +158,9 @@ func (d *QuarkOpen) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
}
|
||||
|
||||
if len(writers) > 0 {
|
||||
_, err := stream.CacheFullAndWriter(&up, io.MultiWriter(writers...))
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, err := streamPkg.CacheFullInTempFileAndWriter(stream, cacheFileProgress, io.MultiWriter(writers...))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@ -143,7 +144,9 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
}
|
||||
|
||||
if len(writers) > 0 {
|
||||
_, err := stream.CacheFullAndWriter(&up, io.MultiWriter(writers...))
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, err := streamPkg.CacheFullInTempFileAndWriter(stream, cacheFileProgress, io.MultiWriter(writers...))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -173,9 +173,8 @@ func (d *Strm) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
|
||||
}, nil
|
||||
}
|
||||
|
||||
resultLink := *link
|
||||
resultLink.SyncClosers = utils.NewSyncClosers(link)
|
||||
return &resultLink, nil
|
||||
// 没有修改link的字段,可直接返回
|
||||
return link, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Strm)(nil)
|
||||
|
@ -132,7 +132,7 @@ func (d *Terabox) Remove(ctx context.Context, obj model.Obj) error {
|
||||
func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
resp, err := base.RestyClient.R().
|
||||
SetContext(ctx).
|
||||
Get("https://" + d.url_domain_prefix + "-data.terabox.com/rest/2.0/pcs/file?method=locateupload")
|
||||
Get("https://d.terabox.com/rest/2.0/pcs/file?method=locateupload")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -179,7 +179,7 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
}
|
||||
|
||||
// upload chunks
|
||||
tempFile, err := stream.CacheFullAndWriter(&up, nil)
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -371,7 +371,9 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.Fi
|
||||
gcid := file.GetHash().GetHash(hash_extend.GCID)
|
||||
var err error
|
||||
if len(gcid) < hash_extend.GCID.Width {
|
||||
_, gcid, err = stream.CacheFullAndHash(file, &up, hash_extend.GCID, file.GetSize())
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, gcid, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, hash_extend.GCID, file.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -491,7 +491,9 @@ func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream
|
||||
gcid := stream.GetHash().GetHash(hash_extend.GCID)
|
||||
var err error
|
||||
if len(gcid) < hash_extend.GCID.Width {
|
||||
_, gcid, err = streamPkg.CacheFullAndHash(stream, &up, hash_extend.GCID, stream.GetSize())
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, gcid, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, hash_extend.GCID, stream.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -372,7 +372,9 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, file model.F
|
||||
gcid := file.GetHash().GetHash(hash_extend.GCID)
|
||||
var err error
|
||||
if len(gcid) < hash_extend.GCID.Width {
|
||||
_, gcid, err = stream.CacheFullAndHash(file, &up, hash_extend.GCID, file.GetSize())
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, gcid, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, hash_extend.GCID, file.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ type Addition struct {
|
||||
// driver.RootPath
|
||||
// driver.RootID
|
||||
// define other
|
||||
UrlStructure string `json:"url_structure" type:"text" required:"true" default:"https://raw.githubusercontent.com/OpenListTeam/OpenList/main/README.md\nhttps://raw.githubusercontent.com/OpenListTeam/OpenList/main/README_cn.md\nfolder:\n CONTRIBUTING.md:1635:https://raw.githubusercontent.com/OpenListTeam/OpenList/main/CONTRIBUTING.md\n CODE_OF_CONDUCT.md:2093:https://raw.githubusercontent.com/OpenListTeam/OpenList/main/CODE_OF_CONDUCT.md" help:"structure:FolderName:\n [FileName:][FileSize:][Modified:]Url"`
|
||||
UrlStructure string `json:"url_structure" type:"text" required:"true" default:"https://cdn.oplist.org/gh/OpenListTeam/OpenList/README.md\nhttps://cdn.oplist.org/gh/OpenListTeam/OpenList/README_cn.md\nfolder:\n CONTRIBUTING.md:1635:https://cdn.oplist.org/gh/OpenListTeam/OpenList/CONTRIBUTING.md\n CODE_OF_CONDUCT.md:2093:https://cdn.oplist.org/gh/OpenListTeam/OpenList/CODE_OF_CONDUCT.md" help:"structure:FolderName:\n [FileName:][FileSize:][Modified:]Url"`
|
||||
HeadSize bool `json:"head_size" type:"bool" default:"false" help:"Use head method to get file size, but it may be failed."`
|
||||
Writable bool `json:"writable" type:"bool" default:"false"`
|
||||
}
|
||||
|
@ -317,7 +317,7 @@ func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
if folder, ok = dstDir.(*Folder); !ok {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
file, err := stream.CacheFullAndWriter(&up, nil)
|
||||
file, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -5,21 +5,10 @@ umask ${UMASK}
|
||||
if [ "$1" = "version" ]; then
|
||||
./openlist version
|
||||
else
|
||||
# Define the target directory path for aria2 service
|
||||
ARIA2_DIR="/opt/service/start/aria2"
|
||||
|
||||
if [ "$RUN_ARIA2" = "true" ]; then
|
||||
# If aria2 should run and target directory doesn't exist, copy it
|
||||
if [ ! -d "$ARIA2_DIR" ]; then
|
||||
mkdir -p "$ARIA2_DIR"
|
||||
cp -r /opt/service/stop/aria2/* "$ARIA2_DIR" 2>/dev/null
|
||||
fi
|
||||
runsvdir /opt/service/start &
|
||||
else
|
||||
# If aria2 should NOT run and target directory exists, remove it
|
||||
if [ -d "$ARIA2_DIR" ]; then
|
||||
rm -rf "$ARIA2_DIR"
|
||||
fi
|
||||
cp -a /opt/service/stop/aria2 /opt/service/start 2>/dev/null
|
||||
fi
|
||||
exec ./openlist server --no-prefix
|
||||
fi
|
||||
|
||||
chown -R ${PUID}:${PGID} /opt
|
||||
exec su-exec ${PUID}:${PGID} runsvdir /opt/service/start
|
||||
fi
|
2
go.mod
2
go.mod
@ -21,7 +21,7 @@ require (
|
||||
github.com/charmbracelet/bubbletea v1.3.6
|
||||
github.com/charmbracelet/lipgloss v1.1.0
|
||||
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e
|
||||
github.com/coreos/go-oidc v2.3.0+incompatible
|
||||
github.com/coreos/go-oidc v2.4.0+incompatible
|
||||
github.com/deckarep/golang-set/v2 v2.8.0
|
||||
github.com/dhowden/tag v0.0.0-20240417053706-3d75831295e8
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
|
2
go.sum
2
go.sum
@ -205,6 +205,8 @@ github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJ
|
||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||
github.com/coreos/go-oidc v2.3.0+incompatible h1:+5vEsrgprdLjjQ9FzIKAzQz1wwPD+83hQRfUIPh7rO0=
|
||||
github.com/coreos/go-oidc v2.3.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-oidc v2.4.0+incompatible h1:xjdlhLWXcINyUJgLQ9I76g7osgC2goiL6JDXS6Fegjk=
|
||||
github.com/coreos/go-oidc v2.4.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
|
@ -77,10 +77,6 @@ func InitConfig() {
|
||||
log.Fatalf("update config struct error: %+v", err)
|
||||
}
|
||||
}
|
||||
if !conf.Conf.Force {
|
||||
confFromEnv()
|
||||
}
|
||||
|
||||
if conf.Conf.MaxConcurrency > 0 {
|
||||
net.DefaultConcurrencyLimit = &net.ConcurrencyLimit{Limit: conf.Conf.MaxConcurrency}
|
||||
}
|
||||
@ -95,32 +91,26 @@ func InitConfig() {
|
||||
} else {
|
||||
conf.MaxBufferLimit = conf.Conf.MaxBufferLimit * utils.MB
|
||||
}
|
||||
log.Infof("max buffer limit: %dMB", conf.MaxBufferLimit/utils.MB)
|
||||
if conf.Conf.MmapThreshold > 0 {
|
||||
conf.MmapThreshold = conf.Conf.MmapThreshold * utils.MB
|
||||
} else {
|
||||
conf.MmapThreshold = 0
|
||||
log.Infof("max buffer limit: %d", conf.MaxBufferLimit)
|
||||
if !conf.Conf.Force {
|
||||
confFromEnv()
|
||||
}
|
||||
log.Infof("mmap threshold: %dMB", conf.Conf.MmapThreshold)
|
||||
|
||||
if len(conf.Conf.Log.Filter.Filters) == 0 {
|
||||
conf.Conf.Log.Filter.Enable = false
|
||||
}
|
||||
// convert abs path
|
||||
convertAbsPath := func(path *string) {
|
||||
if *path != "" && !filepath.IsAbs(*path) {
|
||||
if !filepath.IsAbs(*path) {
|
||||
*path = filepath.Join(pwd, *path)
|
||||
}
|
||||
}
|
||||
convertAbsPath(&conf.Conf.Database.DBFile)
|
||||
convertAbsPath(&conf.Conf.Scheme.CertFile)
|
||||
convertAbsPath(&conf.Conf.Scheme.KeyFile)
|
||||
convertAbsPath(&conf.Conf.Scheme.UnixFile)
|
||||
convertAbsPath(&conf.Conf.Log.Name)
|
||||
convertAbsPath(&conf.Conf.TempDir)
|
||||
convertAbsPath(&conf.Conf.BleveDir)
|
||||
convertAbsPath(&conf.Conf.DistDir)
|
||||
|
||||
convertAbsPath(&conf.Conf.Log.Name)
|
||||
convertAbsPath(&conf.Conf.Database.DBFile)
|
||||
if conf.Conf.DistDir != "" {
|
||||
convertAbsPath(&conf.Conf.DistDir)
|
||||
}
|
||||
err := os.MkdirAll(conf.Conf.TempDir, 0o777)
|
||||
if err != nil {
|
||||
log.Fatalf("create temp dir error: %+v", err)
|
||||
|
@ -107,11 +107,10 @@ func InitialSettings() []model.SettingItem {
|
||||
{Key: conf.AllowMounted, Value: "true", Type: conf.TypeBool, Group: model.SITE},
|
||||
{Key: conf.RobotsTxt, Value: "User-agent: *\nAllow: /", Type: conf.TypeText, Group: model.SITE},
|
||||
// style settings
|
||||
{Key: conf.Logo, Value: "https://res.oplist.org/logo/logo.svg", MigrationValue: "https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.svg", Type: conf.TypeText, Group: model.STYLE},
|
||||
{Key: conf.Favicon, Value: "https://res.oplist.org/logo/logo.svg", MigrationValue: "https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.svg", Type: conf.TypeString, Group: model.STYLE},
|
||||
{Key: conf.Logo, Value: "https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.svg", Type: conf.TypeText, Group: model.STYLE},
|
||||
{Key: conf.Favicon, Value: "https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.svg", Type: conf.TypeString, Group: model.STYLE},
|
||||
{Key: conf.MainColor, Value: "#1890ff", Type: conf.TypeString, Group: model.STYLE},
|
||||
{Key: "home_icon", Value: "🏠", Type: conf.TypeString, Group: model.STYLE},
|
||||
{Key: "share_icon", Value: "🎁", Type: conf.TypeString, Group: model.STYLE},
|
||||
{Key: "home_container", Value: "max_980px", Type: conf.TypeSelect, Options: "max_980px,hope_container", Group: model.STYLE},
|
||||
{Key: "settings_layout", Value: "list", Type: conf.TypeSelect, Options: "list,responsive", Group: model.STYLE},
|
||||
// preview settings
|
||||
@ -142,7 +141,7 @@ func InitialSettings() []model.SettingItem {
|
||||
// {Key: conf.PdfViewers, Value: `{
|
||||
// "pdf.js":"https://openlistteam.github.io/pdf.js/web/viewer.html?file=$url"
|
||||
//}`, Type: conf.TypeText, Group: model.PREVIEW},
|
||||
{Key: "audio_cover", Value: "https://res.oplist.org/logo/logo.svg", MigrationValue: "https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.svg", Type: conf.TypeString, Group: model.PREVIEW},
|
||||
{Key: "audio_cover", Value: "https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.svg", Type: conf.TypeString, Group: model.PREVIEW},
|
||||
{Key: conf.AudioAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.VideoAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.PreviewArchivesByDefault, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
@ -164,10 +163,6 @@ func InitialSettings() []model.SettingItem {
|
||||
{Key: conf.ForwardDirectLinkParams, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL},
|
||||
{Key: conf.IgnoreDirectLinkParams, Value: "sign,openlist_ts", Type: conf.TypeString, Group: model.GLOBAL},
|
||||
{Key: conf.WebauthnLoginEnabled, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PUBLIC},
|
||||
{Key: conf.SharePreview, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PUBLIC},
|
||||
{Key: conf.ShareArchivePreview, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PUBLIC},
|
||||
{Key: conf.ShareForceProxy, Value: "true", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PRIVATE},
|
||||
{Key: conf.ShareSummaryContent, Value: "@{{creator}} shared {{#each files}}{{#if @first}}\"{{filename this}}\"{{/if}}{{#if @last}}{{#unless (eq @index 0)}} and {{@index}} more files{{/unless}}{{/if}}{{/each}} from {{site_title}}: {{base_url}}/@s/{{id}}{{#if pwd}} , the share code is {{pwd}}{{/if}}{{#if expires}}, please access before {{dateLocaleString expires}}.{{/if}}", Type: conf.TypeText, Group: model.GLOBAL, Flag: model.PUBLIC},
|
||||
|
||||
// single settings
|
||||
{Key: conf.Token, Value: token, Type: conf.TypeString, Group: model.SINGLE, Flag: model.PRIVATE},
|
||||
|
@ -33,8 +33,8 @@ func initUser() {
|
||||
Role: model.ADMIN,
|
||||
BasePath: "/",
|
||||
Authn: "[]",
|
||||
// 0(can see hidden) - 8(webdav read) & 12(can read archives) - 14(can share)
|
||||
Permission: 0x71FF,
|
||||
// 0(can see hidden) - 7(can remove) & 12(can read archives) - 13(can decompress archives)
|
||||
Permission: 0x31FF,
|
||||
}
|
||||
if err := op.CreateUser(admin); err != nil {
|
||||
panic(err)
|
||||
|
@ -120,7 +120,6 @@ type Config struct {
|
||||
Log LogConfig `json:"log" envPrefix:"LOG_"`
|
||||
DelayedStart int `json:"delayed_start" env:"DELAYED_START"`
|
||||
MaxBufferLimit int `json:"max_buffer_limitMB" env:"MAX_BUFFER_LIMIT_MB"`
|
||||
MmapThreshold int `json:"mmap_thresholdMB" env:"MMAP_THRESHOLD_MB"`
|
||||
MaxConnections int `json:"max_connections" env:"MAX_CONNECTIONS"`
|
||||
MaxConcurrency int `json:"max_concurrency" env:"MAX_CONCURRENCY"`
|
||||
TlsInsecureSkipVerify bool `json:"tls_insecure_skip_verify" env:"TLS_INSECURE_SKIP_VERIFY"`
|
||||
@ -177,7 +176,6 @@ func DefaultConfig(dataDir string) *Config {
|
||||
},
|
||||
},
|
||||
MaxBufferLimit: -1,
|
||||
MmapThreshold: 4,
|
||||
MaxConnections: 0,
|
||||
MaxConcurrency: 64,
|
||||
TlsInsecureSkipVerify: true,
|
||||
|
@ -33,7 +33,6 @@ const (
|
||||
PreviewArchivesByDefault = "preview_archives_by_default"
|
||||
ReadMeAutoRender = "readme_autorender"
|
||||
FilterReadMeScripts = "filter_readme_scripts"
|
||||
|
||||
// global
|
||||
HideFiles = "hide_files"
|
||||
CustomizeHead = "customize_head"
|
||||
@ -46,10 +45,6 @@ const (
|
||||
ForwardDirectLinkParams = "forward_direct_link_params"
|
||||
IgnoreDirectLinkParams = "ignore_direct_link_params"
|
||||
WebauthnLoginEnabled = "webauthn_login_enabled"
|
||||
SharePreview = "share_preview"
|
||||
ShareArchivePreview = "share_archive_preview"
|
||||
ShareForceProxy = "share_force_proxy"
|
||||
ShareSummaryContent = "share_summary_content"
|
||||
|
||||
// index
|
||||
SearchIndex = "search_index"
|
||||
@ -172,5 +167,4 @@ const (
|
||||
RequestHeaderKey
|
||||
UserAgentKey
|
||||
PathKey
|
||||
SharingIDKey
|
||||
)
|
||||
|
@ -25,10 +25,7 @@ var PrivacyReg []*regexp.Regexp
|
||||
var (
|
||||
// StoragesLoaded loaded success if empty
|
||||
StoragesLoaded = false
|
||||
// 单个Buffer最大限制
|
||||
MaxBufferLimit = 16 * 1024 * 1024
|
||||
// 超过该阈值的Buffer将使用 mmap 分配,可主动释放内存
|
||||
MmapThreshold = 4 * 1024 * 1024
|
||||
MaxBufferLimit int
|
||||
)
|
||||
var (
|
||||
RawIndexHtml string
|
||||
|
@ -12,7 +12,7 @@ var db *gorm.DB
|
||||
|
||||
func Init(d *gorm.DB) {
|
||||
db = d
|
||||
err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey), new(model.SharingDB))
|
||||
err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey))
|
||||
if err != nil {
|
||||
log.Fatalf("failed migrate database: %s", err.Error())
|
||||
}
|
||||
|
@ -1,62 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils/random"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func GetSharingById(id string) (*model.SharingDB, error) {
|
||||
s := model.SharingDB{ID: id}
|
||||
if err := db.Where(s).First(&s).Error; err != nil {
|
||||
return nil, errors.Wrapf(err, "failed get sharing")
|
||||
}
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
func GetSharings(pageIndex, pageSize int) (sharings []model.SharingDB, count int64, err error) {
|
||||
sharingDB := db.Model(&model.SharingDB{})
|
||||
if err := sharingDB.Count(&count).Error; err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed get sharings count")
|
||||
}
|
||||
if err := sharingDB.Order(columnName("id")).Offset((pageIndex - 1) * pageSize).Limit(pageSize).Find(&sharings).Error; err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed get find sharings")
|
||||
}
|
||||
return sharings, count, nil
|
||||
}
|
||||
|
||||
func GetSharingsByCreatorId(creator uint, pageIndex, pageSize int) (sharings []model.SharingDB, count int64, err error) {
|
||||
sharingDB := db.Model(&model.SharingDB{})
|
||||
cond := model.SharingDB{CreatorId: creator}
|
||||
if err := sharingDB.Where(cond).Count(&count).Error; err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed get sharings count")
|
||||
}
|
||||
if err := sharingDB.Where(cond).Order(columnName("id")).Offset((pageIndex - 1) * pageSize).Limit(pageSize).Find(&sharings).Error; err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed get find sharings")
|
||||
}
|
||||
return sharings, count, nil
|
||||
}
|
||||
|
||||
func CreateSharing(s *model.SharingDB) (string, error) {
|
||||
id := random.String(8)
|
||||
for len(id) < 12 {
|
||||
old := model.SharingDB{
|
||||
ID: id,
|
||||
}
|
||||
if err := db.Where(old).First(&old).Error; err != nil {
|
||||
s.ID = id
|
||||
return id, errors.WithStack(db.Create(s).Error)
|
||||
}
|
||||
id += random.String(1)
|
||||
}
|
||||
return "", errors.New("failed find valid id")
|
||||
}
|
||||
|
||||
func UpdateSharing(s *model.SharingDB) error {
|
||||
return errors.WithStack(db.Save(s).Error)
|
||||
}
|
||||
|
||||
func DeleteSharingById(id string) error {
|
||||
s := model.SharingDB{ID: id}
|
||||
return errors.WithStack(db.Where(s).Delete(&s).Error)
|
||||
}
|
@ -23,10 +23,6 @@ var (
|
||||
UnknownArchiveFormat = errors.New("unknown archive format")
|
||||
WrongArchivePassword = errors.New("wrong archive password")
|
||||
DriverExtractNotSupported = errors.New("driver extraction not supported")
|
||||
|
||||
WrongShareCode = errors.New("wrong share code")
|
||||
InvalidSharing = errors.New("invalid sharing")
|
||||
SharingNotFound = errors.New("sharing not found")
|
||||
)
|
||||
|
||||
// NewErr wrap constant error with an extra message
|
||||
|
@ -70,25 +70,25 @@ func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadT
|
||||
}()
|
||||
var decompressUp model.UpdateProgress
|
||||
if t.CacheFull {
|
||||
total := int64(0)
|
||||
var total, cur int64 = 0, 0
|
||||
for _, s := range ss {
|
||||
total += s.GetSize()
|
||||
}
|
||||
t.SetTotalBytes(total)
|
||||
t.Status = "getting src object"
|
||||
part := 100 / float64(len(ss)+1)
|
||||
for i, s := range ss {
|
||||
if s.GetFile() != nil {
|
||||
continue
|
||||
for _, s := range ss {
|
||||
if s.GetFile() == nil {
|
||||
_, err = stream.CacheFullInTempFileAndWriter(s, func(p float64) {
|
||||
t.SetProgress((float64(cur) + float64(s.GetSize())*p/100.0) / float64(total))
|
||||
}, nil)
|
||||
}
|
||||
_, err = s.CacheFullAndWriter(nil, nil)
|
||||
cur += s.GetSize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
t.SetProgress(float64(i+1) * part)
|
||||
}
|
||||
}
|
||||
decompressUp = model.UpdateProgressWithRange(t.SetProgress, 100-part, 100)
|
||||
t.SetProgress(100.0)
|
||||
decompressUp = func(_ float64) {}
|
||||
} else {
|
||||
decompressUp = t.SetProgress
|
||||
}
|
||||
|
@ -168,7 +168,7 @@ func GetStorage(path string, args *GetStoragesArgs) (driver.Driver, error) {
|
||||
func Other(ctx context.Context, args model.FsOtherArgs) (interface{}, error) {
|
||||
res, err := other(ctx, args)
|
||||
if err != nil {
|
||||
log.Errorf("failed get other %s: %+v", args.Path, err)
|
||||
log.Errorf("failed remove %s: %+v", args.Path, err)
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ func putAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer)
|
||||
return nil, errors.WithStack(errs.UploadNotSupported)
|
||||
}
|
||||
if file.NeedStore() {
|
||||
_, err := file.CacheFullAndWriter(nil, nil)
|
||||
_, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create temp file")
|
||||
}
|
||||
|
@ -77,26 +77,6 @@ type ArchiveDecompressArgs struct {
|
||||
PutIntoNewDir bool
|
||||
}
|
||||
|
||||
type SharingListArgs struct {
|
||||
Refresh bool
|
||||
Pwd string
|
||||
}
|
||||
|
||||
type SharingArchiveMetaArgs struct {
|
||||
ArchiveMetaArgs
|
||||
Pwd string
|
||||
}
|
||||
|
||||
type SharingArchiveListArgs struct {
|
||||
ArchiveListArgs
|
||||
Pwd string
|
||||
}
|
||||
|
||||
type SharingLinkArgs struct {
|
||||
Pwd string
|
||||
LinkArgs
|
||||
}
|
||||
|
||||
type RangeReaderIF interface {
|
||||
RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error)
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package model
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@ -39,17 +40,16 @@ type FileStreamer interface {
|
||||
utils.ClosersIF
|
||||
Obj
|
||||
GetMimetype() string
|
||||
//SetReader(io.Reader)
|
||||
NeedStore() bool
|
||||
IsForceStreamUpload() bool
|
||||
GetExist() Obj
|
||||
SetExist(Obj)
|
||||
// for a non-seekable Stream, RangeRead supports peeking some data, and CacheFullAndWriter still works
|
||||
//for a non-seekable Stream, RangeRead supports peeking some data, and CacheFullInTempFile still works
|
||||
RangeRead(http_range.Range) (io.Reader, error)
|
||||
// for a non-seekable Stream, if Read is called, this function won't work.
|
||||
// caches the full Stream and writes it to writer (if provided, even if the stream is already cached).
|
||||
CacheFullAndWriter(up *UpdateProgress, writer io.Writer) (File, error)
|
||||
SetTmpFile(file File)
|
||||
// if the Stream is not a File and is not cached, returns nil.
|
||||
//for a non-seekable Stream, if Read is called, this function won't work
|
||||
CacheFullInTempFile() (File, error)
|
||||
SetTmpFile(r *os.File)
|
||||
GetFile() File
|
||||
}
|
||||
|
||||
|
@ -1,47 +0,0 @@
|
||||
package model
|
||||
|
||||
import "time"
|
||||
|
||||
type SharingDB struct {
|
||||
ID string `json:"id" gorm:"type:char(12);primaryKey"`
|
||||
FilesRaw string `json:"-" gorm:"type:text"`
|
||||
Expires *time.Time `json:"expires"`
|
||||
Pwd string `json:"pwd"`
|
||||
Accessed int `json:"accessed"`
|
||||
MaxAccessed int `json:"max_accessed"`
|
||||
CreatorId uint `json:"-"`
|
||||
Disabled bool `json:"disabled"`
|
||||
Remark string `json:"remark"`
|
||||
Readme string `json:"readme" gorm:"type:text"`
|
||||
Header string `json:"header" gorm:"type:text"`
|
||||
Sort
|
||||
}
|
||||
|
||||
type Sharing struct {
|
||||
*SharingDB
|
||||
Files []string `json:"files"`
|
||||
Creator *User `json:"-"`
|
||||
}
|
||||
|
||||
func (s *Sharing) Valid() bool {
|
||||
if s.Disabled {
|
||||
return false
|
||||
}
|
||||
if s.MaxAccessed > 0 && s.Accessed >= s.MaxAccessed {
|
||||
return false
|
||||
}
|
||||
if len(s.Files) == 0 {
|
||||
return false
|
||||
}
|
||||
if !s.Creator.CanShare() {
|
||||
return false
|
||||
}
|
||||
if s.Expires != nil && !s.Expires.IsZero() && s.Expires.Before(time.Now()) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Sharing) Verify(pwd string) bool {
|
||||
return s.Pwd == "" || s.Pwd == pwd
|
||||
}
|
@ -54,7 +54,6 @@ type User struct {
|
||||
// 11: ftp/sftp write
|
||||
// 12: can read archives
|
||||
// 13: can decompress archives
|
||||
// 14: can share
|
||||
Permission int32 `json:"permission"`
|
||||
OtpSecret string `json:"-"`
|
||||
SsoID string `json:"sso_id"` // unique by sso platform
|
||||
@ -146,10 +145,6 @@ func (u *User) CanDecompress() bool {
|
||||
return (u.Permission>>13)&1 == 1
|
||||
}
|
||||
|
||||
func (u *User) CanShare() bool {
|
||||
return (u.Permission>>14)&1 == 1
|
||||
}
|
||||
|
||||
func (u *User) JoinPath(reqPath string) (string, error) {
|
||||
return utils.JoinBasePath(u.BasePath, reqPath)
|
||||
}
|
||||
@ -190,5 +185,5 @@ func (u *User) WebAuthnCredentials() []webauthn.Credential {
|
||||
}
|
||||
|
||||
func (u *User) WebAuthnIcon() string {
|
||||
return "https://res.oplist.org/logo/logo.svg"
|
||||
return "https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.svg"
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package net
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -14,7 +15,6 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/rclone/rclone/lib/mmap"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
@ -255,10 +255,7 @@ func (d *downloader) sendChunkTask(newConcurrency bool) error {
|
||||
finalSize += firstSize - minSize
|
||||
}
|
||||
}
|
||||
err := buf.Reset(int(finalSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf.Reset(int(finalSize))
|
||||
ch := chunk{
|
||||
start: d.pos,
|
||||
size: finalSize,
|
||||
@ -648,13 +645,11 @@ func (mr MultiReadCloser) Close() error {
|
||||
}
|
||||
|
||||
type Buf struct {
|
||||
size int //expected size
|
||||
ctx context.Context
|
||||
offR int
|
||||
offW int
|
||||
rw sync.Mutex
|
||||
buf []byte
|
||||
mmap bool
|
||||
buffer *bytes.Buffer
|
||||
size int //expected size
|
||||
ctx context.Context
|
||||
off int
|
||||
rw sync.Mutex
|
||||
|
||||
readSignal chan struct{}
|
||||
readPending bool
|
||||
@ -663,100 +658,76 @@ type Buf struct {
|
||||
// NewBuf is a buffer that can have 1 read & 1 write at the same time.
|
||||
// when read is faster write, immediately feed data to read after written
|
||||
func NewBuf(ctx context.Context, maxSize int) *Buf {
|
||||
br := &Buf{
|
||||
ctx: ctx,
|
||||
size: maxSize,
|
||||
return &Buf{
|
||||
ctx: ctx,
|
||||
buffer: bytes.NewBuffer(make([]byte, 0, maxSize)),
|
||||
size: maxSize,
|
||||
|
||||
readSignal: make(chan struct{}, 1),
|
||||
}
|
||||
if conf.MmapThreshold > 0 && maxSize >= conf.MmapThreshold {
|
||||
m, err := mmap.Alloc(maxSize)
|
||||
if err == nil {
|
||||
br.buf = m
|
||||
br.mmap = true
|
||||
return br
|
||||
}
|
||||
}
|
||||
br.buf = make([]byte, maxSize)
|
||||
return br
|
||||
}
|
||||
|
||||
func (br *Buf) Reset(size int) error {
|
||||
func (br *Buf) Reset(size int) {
|
||||
br.rw.Lock()
|
||||
defer br.rw.Unlock()
|
||||
if br.buf == nil {
|
||||
return io.ErrClosedPipe
|
||||
}
|
||||
if size > cap(br.buf) {
|
||||
return fmt.Errorf("reset size %d exceeds max size %d", size, cap(br.buf))
|
||||
if br.buffer == nil {
|
||||
return
|
||||
}
|
||||
br.buffer.Reset()
|
||||
br.size = size
|
||||
br.offR = 0
|
||||
br.offW = 0
|
||||
return nil
|
||||
br.off = 0
|
||||
}
|
||||
|
||||
func (br *Buf) Read(p []byte) (int, error) {
|
||||
func (br *Buf) Read(p []byte) (n int, err error) {
|
||||
if err := br.ctx.Err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if br.offR >= br.size {
|
||||
if br.off >= br.size {
|
||||
return 0, io.EOF
|
||||
}
|
||||
for {
|
||||
br.rw.Lock()
|
||||
if br.buf == nil {
|
||||
br.rw.Unlock()
|
||||
return 0, io.ErrClosedPipe
|
||||
if br.buffer != nil {
|
||||
n, err = br.buffer.Read(p)
|
||||
} else {
|
||||
err = io.ErrClosedPipe
|
||||
}
|
||||
|
||||
if br.offW < br.offR {
|
||||
if err != nil && err != io.EOF {
|
||||
br.rw.Unlock()
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
if br.offW == br.offR {
|
||||
br.readPending = true
|
||||
if n > 0 {
|
||||
br.off += n
|
||||
br.rw.Unlock()
|
||||
select {
|
||||
case <-br.ctx.Done():
|
||||
return 0, br.ctx.Err()
|
||||
case _, ok := <-br.readSignal:
|
||||
if !ok {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
continue
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
n := copy(p, br.buf[br.offR:br.offW])
|
||||
br.offR += n
|
||||
br.readPending = true
|
||||
br.rw.Unlock()
|
||||
if n < len(p) && br.offR >= br.size {
|
||||
return n, io.EOF
|
||||
// n==0, err==io.EOF
|
||||
select {
|
||||
case <-br.ctx.Done():
|
||||
return 0, br.ctx.Err()
|
||||
case _, ok := <-br.readSignal:
|
||||
if !ok {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
continue
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (br *Buf) Write(p []byte) (int, error) {
|
||||
func (br *Buf) Write(p []byte) (n int, err error) {
|
||||
if err := br.ctx.Err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
br.rw.Lock()
|
||||
defer br.rw.Unlock()
|
||||
if br.buf == nil {
|
||||
if br.buffer == nil {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
if br.offW >= br.size {
|
||||
return 0, io.ErrShortWrite
|
||||
}
|
||||
n := copy(br.buf[br.offW:], p[:min(br.size-br.offW, len(p))])
|
||||
br.offW += n
|
||||
n, err = br.buffer.Write(p)
|
||||
if br.readPending {
|
||||
br.readPending = false
|
||||
select {
|
||||
@ -764,21 +735,12 @@ func (br *Buf) Write(p []byte) (int, error) {
|
||||
default:
|
||||
}
|
||||
}
|
||||
if n < len(p) {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
return n, nil
|
||||
return
|
||||
}
|
||||
|
||||
func (br *Buf) Close() error {
|
||||
func (br *Buf) Close() {
|
||||
br.rw.Lock()
|
||||
defer br.rw.Unlock()
|
||||
var err error
|
||||
if br.mmap {
|
||||
err = mmap.Free(br.buf)
|
||||
br.mmap = false
|
||||
}
|
||||
br.buf = nil
|
||||
br.buffer = nil
|
||||
close(br.readSignal)
|
||||
return err
|
||||
}
|
||||
|
@ -1,139 +0,0 @@
|
||||
package op
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/db"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func makeJoined(sdb []model.SharingDB) []model.Sharing {
|
||||
creator := make(map[uint]*model.User)
|
||||
return utils.MustSliceConvert(sdb, func(s model.SharingDB) model.Sharing {
|
||||
var c *model.User
|
||||
var ok bool
|
||||
if c, ok = creator[s.CreatorId]; !ok {
|
||||
var err error
|
||||
if c, err = GetUserById(s.CreatorId); err != nil {
|
||||
c = nil
|
||||
} else {
|
||||
creator[s.CreatorId] = c
|
||||
}
|
||||
}
|
||||
var files []string
|
||||
if err := utils.Json.UnmarshalFromString(s.FilesRaw, &files); err != nil {
|
||||
files = make([]string, 0)
|
||||
}
|
||||
return model.Sharing{
|
||||
SharingDB: &s,
|
||||
Files: files,
|
||||
Creator: c,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var sharingCache = cache.NewMemCache(cache.WithShards[*model.Sharing](8))
|
||||
var sharingG singleflight.Group[*model.Sharing]
|
||||
|
||||
func GetSharingById(id string, refresh ...bool) (*model.Sharing, error) {
|
||||
if !utils.IsBool(refresh...) {
|
||||
if sharing, ok := sharingCache.Get(id); ok {
|
||||
log.Debugf("use cache when get sharing %s", id)
|
||||
return sharing, nil
|
||||
}
|
||||
}
|
||||
sharing, err, _ := sharingG.Do(id, func() (*model.Sharing, error) {
|
||||
s, err := db.GetSharingById(id)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed get sharing [%s]", id)
|
||||
}
|
||||
creator, err := GetUserById(s.CreatorId)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed get sharing creator [%s]", id)
|
||||
}
|
||||
var files []string
|
||||
if err = utils.Json.UnmarshalFromString(s.FilesRaw, &files); err != nil {
|
||||
files = make([]string, 0)
|
||||
}
|
||||
return &model.Sharing{
|
||||
SharingDB: s,
|
||||
Files: files,
|
||||
Creator: creator,
|
||||
}, nil
|
||||
})
|
||||
return sharing, err
|
||||
}
|
||||
|
||||
func GetSharings(pageIndex, pageSize int) ([]model.Sharing, int64, error) {
|
||||
s, cnt, err := db.GetSharings(pageIndex, pageSize)
|
||||
if err != nil {
|
||||
return nil, 0, errors.WithStack(err)
|
||||
}
|
||||
return makeJoined(s), cnt, nil
|
||||
}
|
||||
|
||||
func GetSharingsByCreatorId(userId uint, pageIndex, pageSize int) ([]model.Sharing, int64, error) {
|
||||
s, cnt, err := db.GetSharingsByCreatorId(userId, pageIndex, pageSize)
|
||||
if err != nil {
|
||||
return nil, 0, errors.WithStack(err)
|
||||
}
|
||||
return makeJoined(s), cnt, nil
|
||||
}
|
||||
|
||||
func GetSharingUnwrapPath(sharing *model.Sharing, path string) (unwrapPath string, err error) {
|
||||
if len(sharing.Files) == 0 {
|
||||
return "", errors.New("cannot get actual path of an invalid sharing")
|
||||
}
|
||||
if len(sharing.Files) == 1 {
|
||||
return stdpath.Join(sharing.Files[0], path), nil
|
||||
}
|
||||
path = utils.FixAndCleanPath(path)[1:]
|
||||
if len(path) == 0 {
|
||||
return "", errors.New("cannot get actual path of a sharing root path")
|
||||
}
|
||||
mapPath := ""
|
||||
child, rest, _ := strings.Cut(path, "/")
|
||||
for _, c := range sharing.Files {
|
||||
if child == stdpath.Base(c) {
|
||||
mapPath = c
|
||||
break
|
||||
}
|
||||
}
|
||||
if mapPath == "" {
|
||||
return "", fmt.Errorf("failed find child [%s] of sharing [%s]", child, sharing.ID)
|
||||
}
|
||||
return stdpath.Join(mapPath, rest), nil
|
||||
}
|
||||
|
||||
func CreateSharing(sharing *model.Sharing) (id string, err error) {
|
||||
sharing.CreatorId = sharing.Creator.ID
|
||||
sharing.FilesRaw, err = utils.Json.MarshalToString(utils.MustSliceConvert(sharing.Files, utils.FixAndCleanPath))
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
return db.CreateSharing(sharing.SharingDB)
|
||||
}
|
||||
|
||||
func UpdateSharing(sharing *model.Sharing, skipMarshal ...bool) (err error) {
|
||||
if !utils.IsBool(skipMarshal...) {
|
||||
sharing.CreatorId = sharing.Creator.ID
|
||||
sharing.FilesRaw, err = utils.Json.MarshalToString(utils.MustSliceConvert(sharing.Files, utils.FixAndCleanPath))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
sharingCache.Del(sharing.ID)
|
||||
return db.UpdateSharing(sharing.SharingDB)
|
||||
}
|
||||
|
||||
func DeleteSharing(sid string) error {
|
||||
sharingCache.Del(sid)
|
||||
return db.DeleteSharingById(sid)
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
package sharing
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func archiveMeta(ctx context.Context, sid, path string, args model.SharingArchiveMetaArgs) (*model.Sharing, *model.ArchiveMetaProvider, error) {
|
||||
sharing, err := op.GetSharingById(sid, args.Refresh)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithStack(errs.SharingNotFound)
|
||||
}
|
||||
if !sharing.Valid() {
|
||||
return sharing, nil, errors.WithStack(errs.InvalidSharing)
|
||||
}
|
||||
if !sharing.Verify(args.Pwd) {
|
||||
return sharing, nil, errors.WithStack(errs.WrongShareCode)
|
||||
}
|
||||
path = utils.FixAndCleanPath(path)
|
||||
if len(sharing.Files) == 1 || path != "/" {
|
||||
unwrapPath, err := op.GetSharingUnwrapPath(sharing, path)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithMessage(err, "failed get sharing unwrap path")
|
||||
}
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(unwrapPath)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithMessage(err, "failed get sharing file")
|
||||
}
|
||||
obj, err := op.GetArchiveMeta(ctx, storage, actualPath, args.ArchiveMetaArgs)
|
||||
return sharing, obj, err
|
||||
}
|
||||
return nil, nil, errors.New("cannot get sharing root archive meta")
|
||||
}
|
||||
|
||||
func archiveList(ctx context.Context, sid, path string, args model.SharingArchiveListArgs) (*model.Sharing, []model.Obj, error) {
|
||||
sharing, err := op.GetSharingById(sid, args.Refresh)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithStack(errs.SharingNotFound)
|
||||
}
|
||||
if !sharing.Valid() {
|
||||
return sharing, nil, errors.WithStack(errs.InvalidSharing)
|
||||
}
|
||||
if !sharing.Verify(args.Pwd) {
|
||||
return sharing, nil, errors.WithStack(errs.WrongShareCode)
|
||||
}
|
||||
path = utils.FixAndCleanPath(path)
|
||||
if len(sharing.Files) == 1 || path != "/" {
|
||||
unwrapPath, err := op.GetSharingUnwrapPath(sharing, path)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithMessage(err, "failed get sharing unwrap path")
|
||||
}
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(unwrapPath)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithMessage(err, "failed get sharing file")
|
||||
}
|
||||
obj, err := op.ListArchive(ctx, storage, actualPath, args.ArchiveListArgs)
|
||||
return sharing, obj, err
|
||||
}
|
||||
return nil, nil, errors.New("cannot get sharing root archive list")
|
||||
}
|
@ -1,60 +0,0 @@
|
||||
package sharing
|
||||
|
||||
import (
|
||||
"context"
|
||||
stdpath "path"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func get(ctx context.Context, sid, path string, args model.SharingListArgs) (*model.Sharing, model.Obj, error) {
|
||||
sharing, err := op.GetSharingById(sid, args.Refresh)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithStack(errs.SharingNotFound)
|
||||
}
|
||||
if !sharing.Valid() {
|
||||
return sharing, nil, errors.WithStack(errs.InvalidSharing)
|
||||
}
|
||||
if !sharing.Verify(args.Pwd) {
|
||||
return sharing, nil, errors.WithStack(errs.WrongShareCode)
|
||||
}
|
||||
path = utils.FixAndCleanPath(path)
|
||||
if len(sharing.Files) == 1 || path != "/" {
|
||||
unwrapPath, err := op.GetSharingUnwrapPath(sharing, path)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithMessage(err, "failed get sharing unwrap path")
|
||||
}
|
||||
if unwrapPath != "/" {
|
||||
virtualFiles := op.GetStorageVirtualFilesByPath(stdpath.Dir(unwrapPath))
|
||||
for _, f := range virtualFiles {
|
||||
if f.GetName() == stdpath.Base(unwrapPath) {
|
||||
return sharing, f, nil
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return sharing, &model.Object{
|
||||
Name: sid,
|
||||
Size: 0,
|
||||
Modified: time.Time{},
|
||||
IsFolder: true,
|
||||
}, nil
|
||||
}
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(unwrapPath)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithMessage(err, "failed get sharing file")
|
||||
}
|
||||
obj, err := op.Get(ctx, storage, actualPath)
|
||||
return sharing, obj, err
|
||||
}
|
||||
return sharing, &model.Object{
|
||||
Name: sid,
|
||||
Size: 0,
|
||||
Modified: time.Time{},
|
||||
IsFolder: true,
|
||||
}, nil
|
||||
}
|
@ -1,46 +0,0 @@
|
||||
package sharing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func link(ctx context.Context, sid, path string, args *LinkArgs) (*model.Sharing, *model.Link, model.Obj, error) {
|
||||
sharing, err := op.GetSharingById(sid, args.SharingListArgs.Refresh)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.WithStack(errs.SharingNotFound)
|
||||
}
|
||||
if !sharing.Valid() {
|
||||
return sharing, nil, nil, errors.WithStack(errs.InvalidSharing)
|
||||
}
|
||||
if !sharing.Verify(args.Pwd) {
|
||||
return sharing, nil, nil, errors.WithStack(errs.WrongShareCode)
|
||||
}
|
||||
path = utils.FixAndCleanPath(path)
|
||||
if len(sharing.Files) == 1 || path != "/" {
|
||||
unwrapPath, err := op.GetSharingUnwrapPath(sharing, path)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.WithMessage(err, "failed get sharing unwrap path")
|
||||
}
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(unwrapPath)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.WithMessage(err, "failed get sharing link")
|
||||
}
|
||||
l, obj, err := op.Link(ctx, storage, actualPath, args.LinkArgs)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.WithMessage(err, "failed get sharing link")
|
||||
}
|
||||
if l.URL != "" && !strings.HasPrefix(l.URL, "http://") && !strings.HasPrefix(l.URL, "https://") {
|
||||
l.URL = common.GetApiUrl(ctx) + l.URL
|
||||
}
|
||||
return sharing, l, obj, nil
|
||||
}
|
||||
return nil, nil, nil, errors.New("cannot get sharing root link")
|
||||
}
|
@ -1,83 +0,0 @@
|
||||
package sharing
|
||||
|
||||
import (
|
||||
"context"
|
||||
stdpath "path"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func list(ctx context.Context, sid, path string, args model.SharingListArgs) (*model.Sharing, []model.Obj, error) {
|
||||
sharing, err := op.GetSharingById(sid, args.Refresh)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithStack(errs.SharingNotFound)
|
||||
}
|
||||
if !sharing.Valid() {
|
||||
return sharing, nil, errors.WithStack(errs.InvalidSharing)
|
||||
}
|
||||
if !sharing.Verify(args.Pwd) {
|
||||
return sharing, nil, errors.WithStack(errs.WrongShareCode)
|
||||
}
|
||||
path = utils.FixAndCleanPath(path)
|
||||
if len(sharing.Files) == 1 || path != "/" {
|
||||
unwrapPath, err := op.GetSharingUnwrapPath(sharing, path)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithMessage(err, "failed get sharing unwrap path")
|
||||
}
|
||||
virtualFiles := op.GetStorageVirtualFilesByPath(unwrapPath)
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(unwrapPath)
|
||||
if err != nil && len(virtualFiles) == 0 {
|
||||
return nil, nil, errors.WithMessage(err, "failed list sharing")
|
||||
}
|
||||
var objs []model.Obj
|
||||
if storage != nil {
|
||||
objs, err = op.List(ctx, storage, actualPath, model.ListArgs{
|
||||
Refresh: args.Refresh,
|
||||
ReqPath: stdpath.Join(sid, path),
|
||||
})
|
||||
if err != nil && len(virtualFiles) == 0 {
|
||||
return nil, nil, errors.WithMessage(err, "failed list sharing")
|
||||
}
|
||||
}
|
||||
om := model.NewObjMerge()
|
||||
objs = om.Merge(objs, virtualFiles...)
|
||||
model.SortFiles(objs, sharing.OrderBy, sharing.OrderDirection)
|
||||
model.ExtractFolder(objs, sharing.ExtractFolder)
|
||||
return sharing, objs, nil
|
||||
}
|
||||
objs := make([]model.Obj, 0, len(sharing.Files))
|
||||
for _, f := range sharing.Files {
|
||||
if f != "/" {
|
||||
isVf := false
|
||||
virtualFiles := op.GetStorageVirtualFilesByPath(stdpath.Dir(f))
|
||||
for _, vf := range virtualFiles {
|
||||
if vf.GetName() == stdpath.Base(f) {
|
||||
objs = append(objs, vf)
|
||||
isVf = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if isVf {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(f)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
obj, err := op.Get(ctx, storage, actualPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
objs = append(objs, obj)
|
||||
}
|
||||
model.SortFiles(objs, sharing.OrderBy, sharing.OrderDirection)
|
||||
model.ExtractFolder(objs, sharing.ExtractFolder)
|
||||
return sharing, objs, nil
|
||||
}
|
@ -1,58 +0,0 @@
|
||||
package sharing
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func List(ctx context.Context, sid, path string, args model.SharingListArgs) (*model.Sharing, []model.Obj, error) {
|
||||
sharing, res, err := list(ctx, sid, path, args)
|
||||
if err != nil {
|
||||
log.Errorf("failed list sharing %s/%s: %+v", sid, path, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return sharing, res, nil
|
||||
}
|
||||
|
||||
func Get(ctx context.Context, sid, path string, args model.SharingListArgs) (*model.Sharing, model.Obj, error) {
|
||||
sharing, res, err := get(ctx, sid, path, args)
|
||||
if err != nil {
|
||||
log.Warnf("failed get sharing %s/%s: %s", sid, path, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return sharing, res, nil
|
||||
}
|
||||
|
||||
func ArchiveMeta(ctx context.Context, sid, path string, args model.SharingArchiveMetaArgs) (*model.Sharing, *model.ArchiveMetaProvider, error) {
|
||||
sharing, res, err := archiveMeta(ctx, sid, path, args)
|
||||
if err != nil {
|
||||
log.Warnf("failed get sharing archive meta %s/%s: %s", sid, path, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return sharing, res, nil
|
||||
}
|
||||
|
||||
func ArchiveList(ctx context.Context, sid, path string, args model.SharingArchiveListArgs) (*model.Sharing, []model.Obj, error) {
|
||||
sharing, res, err := archiveList(ctx, sid, path, args)
|
||||
if err != nil {
|
||||
log.Warnf("failed list sharing archive %s/%s: %s", sid, path, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return sharing, res, nil
|
||||
}
|
||||
|
||||
type LinkArgs struct {
|
||||
model.SharingListArgs
|
||||
model.LinkArgs
|
||||
}
|
||||
|
||||
func Link(ctx context.Context, sid, path string, args *LinkArgs) (*model.Sharing, *model.Link, model.Obj, error) {
|
||||
sharing, res, file, err := link(ctx, sid, path, args)
|
||||
if err != nil {
|
||||
log.Errorf("failed get sharing link %s/%s: %+v", sid, path, err)
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return sharing, res, file, nil
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -12,10 +13,8 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/buffer"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/rclone/rclone/lib/mmap"
|
||||
"go4.org/readerutil"
|
||||
)
|
||||
|
||||
@ -28,19 +27,13 @@ type FileStream struct {
|
||||
ForceStreamUpload bool
|
||||
Exist model.Obj //the file existed in the destination, we can reuse some info since we wil overwrite it
|
||||
utils.Closers
|
||||
|
||||
tmpFile model.File //if present, tmpFile has full content, it will be deleted at last
|
||||
peekBuff *buffer.Reader
|
||||
size int64
|
||||
oriReader io.Reader // the original reader, used for caching
|
||||
tmpFile *os.File //if present, tmpFile has full content, it will be deleted at last
|
||||
peekBuff *bytes.Reader
|
||||
}
|
||||
|
||||
func (f *FileStream) GetSize() int64 {
|
||||
if f.size > 0 {
|
||||
return f.size
|
||||
}
|
||||
if file, ok := f.tmpFile.(*os.File); ok {
|
||||
info, err := file.Stat()
|
||||
if f.tmpFile != nil {
|
||||
info, err := f.tmpFile.Stat()
|
||||
if err == nil {
|
||||
return info.Size()
|
||||
}
|
||||
@ -61,20 +54,16 @@ func (f *FileStream) IsForceStreamUpload() bool {
|
||||
}
|
||||
|
||||
func (f *FileStream) Close() error {
|
||||
if f.peekBuff != nil {
|
||||
f.peekBuff.Reset()
|
||||
f.peekBuff = nil
|
||||
}
|
||||
|
||||
var err1, err2 error
|
||||
|
||||
err1 = f.Closers.Close()
|
||||
if errors.Is(err1, os.ErrClosed) {
|
||||
err1 = nil
|
||||
}
|
||||
if file, ok := f.tmpFile.(*os.File); ok {
|
||||
err2 = os.RemoveAll(file.Name())
|
||||
if f.tmpFile != nil {
|
||||
err2 = os.RemoveAll(f.tmpFile.Name())
|
||||
if err2 != nil {
|
||||
err2 = errs.NewErr(err2, "failed to remove tmpFile [%s]", file.Name())
|
||||
err2 = errs.NewErr(err2, "failed to remove tmpFile [%s]", f.tmpFile.Name())
|
||||
} else {
|
||||
f.tmpFile = nil
|
||||
}
|
||||
@ -90,55 +79,20 @@ func (f *FileStream) SetExist(obj model.Obj) {
|
||||
f.Exist = obj
|
||||
}
|
||||
|
||||
// CacheFullAndWriter save all data into tmpFile or memory.
|
||||
// It's not thread-safe!
|
||||
func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writer) (model.File, error) {
|
||||
if cache := f.GetFile(); cache != nil {
|
||||
if writer == nil {
|
||||
return cache, nil
|
||||
}
|
||||
_, err := cache.Seek(0, io.SeekStart)
|
||||
if err == nil {
|
||||
reader := f.Reader
|
||||
if up != nil {
|
||||
cacheProgress := model.UpdateProgressWithRange(*up, 0, 50)
|
||||
*up = model.UpdateProgressWithRange(*up, 50, 100)
|
||||
reader = &ReaderUpdatingProgress{
|
||||
Reader: &SimpleReaderWithSize{
|
||||
Reader: reader,
|
||||
Size: f.GetSize(),
|
||||
},
|
||||
UpdateProgress: cacheProgress,
|
||||
}
|
||||
}
|
||||
_, err = utils.CopyWithBuffer(writer, reader)
|
||||
if err == nil {
|
||||
_, err = cache.Seek(0, io.SeekStart)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cache, nil
|
||||
// CacheFullInTempFile save all data into tmpFile. Not recommended since it wears disk,
|
||||
// and can't start upload until the file is written. It's not thread-safe!
|
||||
func (f *FileStream) CacheFullInTempFile() (model.File, error) {
|
||||
if file := f.GetFile(); file != nil {
|
||||
return file, nil
|
||||
}
|
||||
|
||||
reader := f.Reader
|
||||
if up != nil {
|
||||
cacheProgress := model.UpdateProgressWithRange(*up, 0, 50)
|
||||
*up = model.UpdateProgressWithRange(*up, 50, 100)
|
||||
reader = &ReaderUpdatingProgress{
|
||||
Reader: &SimpleReaderWithSize{
|
||||
Reader: reader,
|
||||
Size: f.GetSize(),
|
||||
},
|
||||
UpdateProgress: cacheProgress,
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(f.Reader, f.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if writer != nil {
|
||||
reader = io.TeeReader(reader, writer)
|
||||
}
|
||||
f.Reader = reader
|
||||
return f.cache(f.GetSize())
|
||||
f.Add(tmpF)
|
||||
f.tmpFile = tmpF
|
||||
f.Reader = tmpF
|
||||
return tmpF, nil
|
||||
}
|
||||
|
||||
func (f *FileStream) GetFile() model.File {
|
||||
@ -152,80 +106,40 @@ func (f *FileStream) GetFile() model.File {
|
||||
}
|
||||
|
||||
// RangeRead have to cache all data first since only Reader is provided.
|
||||
// It's not thread-safe!
|
||||
// also support a peeking RangeRead at very start, but won't buffer more than conf.MaxBufferLimit data in memory
|
||||
func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > f.GetSize() {
|
||||
httpRange.Length = f.GetSize() - httpRange.Start
|
||||
}
|
||||
if f.GetFile() != nil {
|
||||
return io.NewSectionReader(f.GetFile(), httpRange.Start, httpRange.Length), nil
|
||||
var cache io.ReaderAt = f.GetFile()
|
||||
if cache != nil {
|
||||
return io.NewSectionReader(cache, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
|
||||
size := httpRange.Start + httpRange.Length
|
||||
if f.peekBuff != nil && size <= int64(f.peekBuff.Len()) {
|
||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
|
||||
cache, err := f.cache(size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return io.NewSectionReader(cache, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
|
||||
// *旧笔记
|
||||
// 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom
|
||||
// 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大
|
||||
|
||||
func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
||||
if maxCacheSize > int64(conf.MaxBufferLimit) {
|
||||
tmpF, err := utils.CreateTempFile(f.Reader, f.GetSize())
|
||||
if size <= int64(conf.MaxBufferLimit) {
|
||||
bufSize := min(size, f.GetSize())
|
||||
// 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom
|
||||
// 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大
|
||||
buf := make([]byte, bufSize)
|
||||
n, err := io.ReadFull(f.Reader, buf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err)
|
||||
}
|
||||
f.peekBuff = bytes.NewReader(buf)
|
||||
f.Reader = io.MultiReader(f.peekBuff, f.Reader)
|
||||
cache = f.peekBuff
|
||||
} else {
|
||||
var err error
|
||||
cache, err = f.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Add(tmpF)
|
||||
f.tmpFile = tmpF
|
||||
f.Reader = tmpF
|
||||
return tmpF, nil
|
||||
}
|
||||
|
||||
if f.peekBuff == nil {
|
||||
f.peekBuff = &buffer.Reader{}
|
||||
f.oriReader = f.Reader
|
||||
}
|
||||
bufSize := maxCacheSize - int64(f.peekBuff.Len())
|
||||
var buf []byte
|
||||
if conf.MmapThreshold > 0 && bufSize >= int64(conf.MmapThreshold) {
|
||||
m, err := mmap.Alloc(int(bufSize))
|
||||
if err == nil {
|
||||
f.Add(utils.CloseFunc(func() error {
|
||||
return mmap.Free(m)
|
||||
}))
|
||||
buf = m
|
||||
}
|
||||
}
|
||||
if buf == nil {
|
||||
buf = make([]byte, bufSize)
|
||||
}
|
||||
n, err := io.ReadFull(f.oriReader, buf)
|
||||
if bufSize != int64(n) {
|
||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err)
|
||||
}
|
||||
f.peekBuff.Append(buf)
|
||||
if int64(f.peekBuff.Len()) >= f.GetSize() {
|
||||
f.Reader = f.peekBuff
|
||||
f.oriReader = nil
|
||||
} else {
|
||||
f.Reader = io.MultiReader(f.peekBuff, f.oriReader)
|
||||
}
|
||||
return f.peekBuff, nil
|
||||
}
|
||||
|
||||
func (f *FileStream) SetTmpFile(file model.File) {
|
||||
f.AddIfCloser(file)
|
||||
f.tmpFile = file
|
||||
f.Reader = file
|
||||
return io.NewSectionReader(cache, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
|
||||
var _ model.FileStreamer = (*SeekableStream)(nil)
|
||||
@ -242,6 +156,7 @@ type SeekableStream struct {
|
||||
*FileStream
|
||||
// should have one of belows to support rangeRead
|
||||
rangeReadCloser model.RangeReadCloserIF
|
||||
size int64
|
||||
}
|
||||
|
||||
func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error) {
|
||||
@ -263,26 +178,38 @@ func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rrc := &model.RangeReadCloser{
|
||||
RangeReader: rr,
|
||||
}
|
||||
if _, ok := rr.(*model.FileRangeReader); ok {
|
||||
fs.Reader, err = rrc.RangeRead(fs.Ctx, http_range.Range{Length: -1})
|
||||
fs.Reader, err = rr.RangeRead(fs.Ctx, http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Add(link)
|
||||
return &SeekableStream{FileStream: fs, size: size}, nil
|
||||
}
|
||||
rrc := &model.RangeReadCloser{
|
||||
RangeReader: rr,
|
||||
}
|
||||
fs.size = size
|
||||
fs.Add(link)
|
||||
fs.Add(rrc)
|
||||
return &SeekableStream{FileStream: fs, rangeReadCloser: rrc}, nil
|
||||
return &SeekableStream{FileStream: fs, rangeReadCloser: rrc, size: size}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("illegal seekableStream")
|
||||
}
|
||||
|
||||
func (ss *SeekableStream) GetSize() int64 {
|
||||
if ss.size > 0 {
|
||||
return ss.size
|
||||
}
|
||||
return ss.FileStream.GetSize()
|
||||
}
|
||||
|
||||
//func (ss *SeekableStream) Peek(length int) {
|
||||
//
|
||||
//}
|
||||
|
||||
// RangeRead is not thread-safe, pls use it in single thread only.
|
||||
func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
if ss.GetFile() == nil && ss.rangeReadCloser != nil {
|
||||
if ss.tmpFile == nil && ss.rangeReadCloser != nil {
|
||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, httpRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -292,37 +219,47 @@ func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, erro
|
||||
return ss.FileStream.RangeRead(httpRange)
|
||||
}
|
||||
|
||||
//func (f *FileStream) GetReader() io.Reader {
|
||||
// return f.Reader
|
||||
//}
|
||||
|
||||
// only provide Reader as full stream when it's demanded. in rapid-upload, we can skip this to save memory
|
||||
func (ss *SeekableStream) Read(p []byte) (n int, err error) {
|
||||
if err := ss.generateReader(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return ss.FileStream.Read(p)
|
||||
}
|
||||
|
||||
func (ss *SeekableStream) generateReader() error {
|
||||
if ss.Reader == nil {
|
||||
if ss.rangeReadCloser == nil {
|
||||
return fmt.Errorf("illegal seekableStream")
|
||||
return 0, fmt.Errorf("illegal seekableStream")
|
||||
}
|
||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
ss.Reader = rc
|
||||
}
|
||||
return nil
|
||||
return ss.Reader.Read(p)
|
||||
}
|
||||
|
||||
func (ss *SeekableStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writer) (model.File, error) {
|
||||
if err := ss.generateReader(); err != nil {
|
||||
func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) {
|
||||
if file := ss.GetFile(); file != nil {
|
||||
return file, nil
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(ss, ss.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ss.FileStream.CacheFullAndWriter(up, writer)
|
||||
ss.Add(tmpF)
|
||||
ss.tmpFile = tmpF
|
||||
ss.Reader = tmpF
|
||||
return tmpF, nil
|
||||
}
|
||||
|
||||
func (f *FileStream) SetTmpFile(r *os.File) {
|
||||
f.Add(r)
|
||||
f.tmpFile = r
|
||||
f.Reader = r
|
||||
}
|
||||
|
||||
type ReaderWithSize interface {
|
||||
io.Reader
|
||||
io.ReadCloser
|
||||
GetSize() int64
|
||||
}
|
||||
|
||||
@ -356,10 +293,7 @@ func (r *ReaderUpdatingProgress) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
func (r *ReaderUpdatingProgress) Close() error {
|
||||
if c, ok := r.Reader.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
return r.Reader.Close()
|
||||
}
|
||||
|
||||
type RangeReadReadAtSeeker struct {
|
||||
@ -377,20 +311,19 @@ type headCache struct {
|
||||
func (c *headCache) head(p []byte) (int, error) {
|
||||
n := 0
|
||||
for _, buf := range c.bufs {
|
||||
n += copy(p[n:], buf)
|
||||
if n == len(p) {
|
||||
if len(buf)+n >= len(p) {
|
||||
n += copy(p[n:], buf[:len(p)-n])
|
||||
return n, nil
|
||||
} else {
|
||||
n += copy(p[n:], buf)
|
||||
}
|
||||
}
|
||||
nn, err := io.ReadFull(c.reader, p[n:])
|
||||
if nn > 0 {
|
||||
buf := make([]byte, nn)
|
||||
copy(buf, p[n:])
|
||||
w, err := io.ReadAtLeast(c.reader, p[n:], 1)
|
||||
if w > 0 {
|
||||
buf := make([]byte, w)
|
||||
copy(buf, p[n:n+w])
|
||||
c.bufs = append(c.bufs, buf)
|
||||
n += nn
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
err = io.EOF
|
||||
}
|
||||
n += w
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@ -489,9 +422,6 @@ func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (io.Reader, error)
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
if off < 0 || off >= r.ss.GetSize() {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if off == 0 && r.headCache != nil {
|
||||
return r.headCache.head(p)
|
||||
}
|
||||
@ -500,15 +430,12 @@ func (r *RangeReadReadAtSeeker) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = io.ReadFull(rr, p)
|
||||
if n > 0 {
|
||||
off += int64(n)
|
||||
switch err {
|
||||
case nil:
|
||||
r.readerMap.Store(int64(off), rr)
|
||||
case io.ErrUnexpectedEOF:
|
||||
err = io.EOF
|
||||
}
|
||||
n, err = io.ReadAtLeast(rr, p, 1)
|
||||
off += int64(n)
|
||||
if err == nil {
|
||||
r.readerMap.Store(int64(off), rr)
|
||||
} else {
|
||||
rr = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@ -517,14 +444,20 @@ func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
case io.SeekCurrent:
|
||||
if offset == 0 {
|
||||
return r.masterOff, nil
|
||||
}
|
||||
offset += r.masterOff
|
||||
case io.SeekEnd:
|
||||
offset += r.ss.GetSize()
|
||||
default:
|
||||
return 0, errors.New("Seek: invalid whence")
|
||||
return 0, errs.NotSupport
|
||||
}
|
||||
if offset < 0 || offset > r.ss.GetSize() {
|
||||
return 0, errors.New("Seek: invalid offset")
|
||||
if offset < 0 {
|
||||
return r.masterOff, errors.New("invalid seek: negative position")
|
||||
}
|
||||
if offset > r.ss.GetSize() {
|
||||
offset = r.ss.GetSize()
|
||||
}
|
||||
r.masterOff = offset
|
||||
return offset, nil
|
||||
@ -532,8 +465,6 @@ func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
|
||||
func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) {
|
||||
n, err = r.ReadAt(p, r.masterOff)
|
||||
if n > 0 {
|
||||
r.masterOff += int64(n)
|
||||
}
|
||||
r.masterOff += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
@ -1,88 +0,0 @@
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
)
|
||||
|
||||
func TestFileStream_RangeRead(t *testing.T) {
|
||||
conf.MaxBufferLimit = 16 * 1024 * 1024
|
||||
type args struct {
|
||||
httpRange http_range.Range
|
||||
}
|
||||
buf := []byte("github.com/OpenListTeam/OpenList")
|
||||
f := &FileStream{
|
||||
Obj: &model.Object{
|
||||
Size: int64(len(buf)),
|
||||
},
|
||||
Reader: io.NopCloser(bytes.NewReader(buf)),
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
f *FileStream
|
||||
args args
|
||||
want func(f *FileStream, got io.Reader, err error) error
|
||||
}{
|
||||
{
|
||||
name: "range 11-12",
|
||||
f: f,
|
||||
args: args{
|
||||
httpRange: http_range.Range{Start: 11, Length: 12},
|
||||
},
|
||||
want: func(f *FileStream, got io.Reader, err error) error {
|
||||
if f.GetFile() != nil {
|
||||
return errors.New("cached")
|
||||
}
|
||||
b, _ := io.ReadAll(got)
|
||||
if !bytes.Equal(buf[11:11+12], b) {
|
||||
return fmt.Errorf("=%s ,want =%s", b, buf[11:11+12])
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "range 11-21",
|
||||
f: f,
|
||||
args: args{
|
||||
httpRange: http_range.Range{Start: 11, Length: 21},
|
||||
},
|
||||
want: func(f *FileStream, got io.Reader, err error) error {
|
||||
if f.GetFile() == nil {
|
||||
return errors.New("not cached")
|
||||
}
|
||||
b, _ := io.ReadAll(got)
|
||||
if !bytes.Equal(buf[11:11+21], b) {
|
||||
return fmt.Errorf("=%s ,want =%s", b, buf[11:11+21])
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := tt.f.RangeRead(tt.args.httpRange)
|
||||
if err := tt.want(tt.f, got, err); err != nil {
|
||||
t.Errorf("FileStream.RangeRead() %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
t.Run("after", func(t *testing.T) {
|
||||
if f.GetFile() == nil {
|
||||
t.Error("not cached")
|
||||
}
|
||||
buf2 := make([]byte, len(buf))
|
||||
if _, err := io.ReadFull(f, buf2); err != nil {
|
||||
t.Errorf("FileStream.Read() error = %v", err)
|
||||
}
|
||||
if !bytes.Equal(buf, buf2) {
|
||||
t.Errorf("FileStream.Read() = %s, want %s", buf2, buf)
|
||||
}
|
||||
})
|
||||
}
|
@ -8,14 +8,13 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/net"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/pool"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/rclone/rclone/lib/mmap"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -142,61 +141,81 @@ func (r *ReaderWithCtx) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func CacheFullAndHash(stream model.FileStreamer, up *model.UpdateProgress, hashType *utils.HashType, hashParams ...any) (model.File, string, error) {
|
||||
func CacheFullInTempFileAndWriter(stream model.FileStreamer, up model.UpdateProgress, w io.Writer) (model.File, error) {
|
||||
if cache := stream.GetFile(); cache != nil {
|
||||
if w != nil {
|
||||
_, err := cache.Seek(0, io.SeekStart)
|
||||
if err == nil {
|
||||
var reader io.Reader = stream
|
||||
if up != nil {
|
||||
reader = &ReaderUpdatingProgress{
|
||||
Reader: stream,
|
||||
UpdateProgress: up,
|
||||
}
|
||||
}
|
||||
_, err = utils.CopyWithBuffer(w, reader)
|
||||
if err == nil {
|
||||
_, err = cache.Seek(0, io.SeekStart)
|
||||
}
|
||||
}
|
||||
return cache, err
|
||||
}
|
||||
if up != nil {
|
||||
up(100)
|
||||
}
|
||||
return cache, nil
|
||||
}
|
||||
|
||||
var reader io.Reader = stream
|
||||
if up != nil {
|
||||
reader = &ReaderUpdatingProgress{
|
||||
Reader: stream,
|
||||
UpdateProgress: up,
|
||||
}
|
||||
}
|
||||
|
||||
if w != nil {
|
||||
reader = io.TeeReader(reader, w)
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(reader, stream.GetSize())
|
||||
if err == nil {
|
||||
stream.SetTmpFile(tmpF)
|
||||
}
|
||||
return tmpF, err
|
||||
}
|
||||
|
||||
func CacheFullInTempFileAndHash(stream model.FileStreamer, up model.UpdateProgress, hashType *utils.HashType, hashParams ...any) (model.File, string, error) {
|
||||
h := hashType.NewFunc(hashParams...)
|
||||
tmpF, err := stream.CacheFullAndWriter(up, h)
|
||||
tmpF, err := CacheFullInTempFileAndWriter(stream, up, h)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return tmpF, hex.EncodeToString(h.Sum(nil)), nil
|
||||
return tmpF, hex.EncodeToString(h.Sum(nil)), err
|
||||
}
|
||||
|
||||
type StreamSectionReader struct {
|
||||
file model.FileStreamer
|
||||
off int64
|
||||
bufPool *pool.Pool[[]byte]
|
||||
bufPool *sync.Pool
|
||||
}
|
||||
|
||||
func NewStreamSectionReader(file model.FileStreamer, maxBufferSize int, up *model.UpdateProgress) (*StreamSectionReader, error) {
|
||||
func NewStreamSectionReader(file model.FileStreamer, maxBufferSize int) (*StreamSectionReader, error) {
|
||||
ss := &StreamSectionReader{file: file}
|
||||
if file.GetFile() != nil {
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
maxBufferSize = min(maxBufferSize, int(file.GetSize()))
|
||||
if maxBufferSize > conf.MaxBufferLimit {
|
||||
_, err := file.CacheFullAndWriter(up, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ss, nil
|
||||
}
|
||||
if conf.MmapThreshold > 0 && maxBufferSize >= conf.MmapThreshold {
|
||||
ss.bufPool = &pool.Pool[[]byte]{
|
||||
New: func() []byte {
|
||||
buf, err := mmap.Alloc(maxBufferSize)
|
||||
if err == nil {
|
||||
file.Add(utils.CloseFunc(func() error {
|
||||
return mmap.Free(buf)
|
||||
}))
|
||||
} else {
|
||||
buf = make([]byte, maxBufferSize)
|
||||
}
|
||||
return buf
|
||||
},
|
||||
}
|
||||
} else {
|
||||
ss.bufPool = &pool.Pool[[]byte]{
|
||||
New: func() []byte {
|
||||
return make([]byte, maxBufferSize)
|
||||
},
|
||||
if file.GetFile() == nil {
|
||||
maxBufferSize = min(maxBufferSize, int(file.GetSize()))
|
||||
if maxBufferSize > conf.MaxBufferLimit {
|
||||
_, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
ss.bufPool = &sync.Pool{
|
||||
New: func() any {
|
||||
return make([]byte, maxBufferSize)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
file.Add(utils.CloseFunc(func() error {
|
||||
ss.bufPool.Reset()
|
||||
return nil
|
||||
}))
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
@ -208,7 +227,7 @@ func (ss *StreamSectionReader) GetSectionReader(off, length int64) (*SectionRead
|
||||
if off != ss.off {
|
||||
return nil, fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off)
|
||||
}
|
||||
tempBuf := ss.bufPool.Get()
|
||||
tempBuf := ss.bufPool.Get().([]byte)
|
||||
buf = tempBuf[:length]
|
||||
n, err := io.ReadFull(ss.file, buf)
|
||||
if int64(n) != length {
|
||||
@ -221,7 +240,7 @@ func (ss *StreamSectionReader) GetSectionReader(off, length int64) (*SectionRead
|
||||
return &SectionReader{io.NewSectionReader(cache, off, length), buf}, nil
|
||||
}
|
||||
|
||||
func (ss *StreamSectionReader) FreeSectionReader(sr *SectionReader) {
|
||||
func (ss *StreamSectionReader) RecycleSectionReader(sr *SectionReader) {
|
||||
if sr != nil {
|
||||
if sr.buf != nil {
|
||||
ss.bufPool.Put(sr.buf[0:cap(sr.buf)])
|
||||
|
@ -1,92 +0,0 @@
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// 用于存储不复用的[]byte
|
||||
type Reader struct {
|
||||
bufs [][]byte
|
||||
length int
|
||||
offset int
|
||||
}
|
||||
|
||||
func (r *Reader) Len() int {
|
||||
return r.length
|
||||
}
|
||||
|
||||
func (r *Reader) Append(buf []byte) {
|
||||
r.length += len(buf)
|
||||
r.bufs = append(r.bufs, buf)
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
n, err := r.ReadAt(p, int64(r.offset))
|
||||
if n > 0 {
|
||||
r.offset += n
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *Reader) ReadAt(p []byte, off int64) (int, error) {
|
||||
if off < 0 || off >= int64(r.length) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n, length := 0, int64(0)
|
||||
readFrom := false
|
||||
for _, buf := range r.bufs {
|
||||
newLength := length + int64(len(buf))
|
||||
if readFrom {
|
||||
w := copy(p[n:], buf)
|
||||
n += w
|
||||
} else if off < newLength {
|
||||
readFrom = true
|
||||
w := copy(p[n:], buf[int(off-length):])
|
||||
n += w
|
||||
}
|
||||
if n == len(p) {
|
||||
return n, nil
|
||||
}
|
||||
length = newLength
|
||||
}
|
||||
|
||||
return n, io.EOF
|
||||
}
|
||||
|
||||
func (r *Reader) Seek(offset int64, whence int) (int64, error) {
|
||||
var abs int
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
abs = int(offset)
|
||||
case io.SeekCurrent:
|
||||
abs = r.offset + int(offset)
|
||||
case io.SeekEnd:
|
||||
abs = r.length + int(offset)
|
||||
default:
|
||||
return 0, errors.New("Seek: invalid whence")
|
||||
}
|
||||
|
||||
if abs < 0 || abs > r.length {
|
||||
return 0, errors.New("Seek: invalid offset")
|
||||
}
|
||||
|
||||
r.offset = abs
|
||||
return int64(abs), nil
|
||||
}
|
||||
|
||||
func (r *Reader) Reset() {
|
||||
clear(r.bufs)
|
||||
r.bufs = nil
|
||||
r.length = 0
|
||||
r.offset = 0
|
||||
}
|
||||
|
||||
func NewReader(buf ...[]byte) *Reader {
|
||||
b := &Reader{}
|
||||
for _, b1 := range buf {
|
||||
b.Append(b1)
|
||||
}
|
||||
return b
|
||||
}
|
@ -1,95 +0,0 @@
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReader_ReadAt(t *testing.T) {
|
||||
type args struct {
|
||||
p []byte
|
||||
off int64
|
||||
}
|
||||
bs := &Reader{}
|
||||
bs.Append([]byte("github.com"))
|
||||
bs.Append([]byte("/"))
|
||||
bs.Append([]byte("OpenList"))
|
||||
bs.Append([]byte("Team/"))
|
||||
bs.Append([]byte("OpenList"))
|
||||
tests := []struct {
|
||||
name string
|
||||
b *Reader
|
||||
args args
|
||||
want func(a args, n int, err error) error
|
||||
}{
|
||||
{
|
||||
name: "readAt len 10 offset 0",
|
||||
b: bs,
|
||||
args: args{
|
||||
p: make([]byte, 10),
|
||||
off: 0,
|
||||
},
|
||||
want: func(a args, n int, err error) error {
|
||||
if n != len(a.p) {
|
||||
return errors.New("read length not match")
|
||||
}
|
||||
if string(a.p) != "github.com" {
|
||||
return errors.New("read content not match")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "readAt len 12 offset 11",
|
||||
b: bs,
|
||||
args: args{
|
||||
p: make([]byte, 12),
|
||||
off: 11,
|
||||
},
|
||||
want: func(a args, n int, err error) error {
|
||||
if n != len(a.p) {
|
||||
return errors.New("read length not match")
|
||||
}
|
||||
if string(a.p) != "OpenListTeam" {
|
||||
return errors.New("read content not match")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "readAt len 50 offset 24",
|
||||
b: bs,
|
||||
args: args{
|
||||
p: make([]byte, 50),
|
||||
off: 24,
|
||||
},
|
||||
want: func(a args, n int, err error) error {
|
||||
if n != bs.Len()-int(a.off) {
|
||||
return errors.New("read length not match")
|
||||
}
|
||||
if string(a.p[:n]) != "OpenList" {
|
||||
return errors.New("read content not match")
|
||||
}
|
||||
if err != io.EOF {
|
||||
return errors.New("expect eof")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := tt.b.ReadAt(tt.args.p, tt.args.off)
|
||||
if err := tt.want(tt.args, got, err); err != nil {
|
||||
t.Errorf("Bytes.ReadAt() error = %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -53,12 +53,11 @@ func (g *Group) Go(do func(ctx context.Context) error) {
|
||||
}
|
||||
|
||||
type Lifecycle struct {
|
||||
// Before在OrderedGroup是线程安全的。
|
||||
// 只会被调用一次
|
||||
// Before在OrderedGroup是线程安全的
|
||||
Before func(ctx context.Context) error
|
||||
// 如果Before返回err就不调用Do
|
||||
Do func(ctx context.Context) error
|
||||
// 最后调用一次After
|
||||
// 最后调用After
|
||||
After func(err error)
|
||||
}
|
||||
|
||||
|
@ -1,37 +0,0 @@
|
||||
package pool
|
||||
|
||||
import "sync"
|
||||
|
||||
type Pool[T any] struct {
|
||||
New func() T
|
||||
MaxCap int
|
||||
|
||||
cache []T
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (p *Pool[T]) Get() T {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if len(p.cache) == 0 {
|
||||
return p.New()
|
||||
}
|
||||
item := p.cache[len(p.cache)-1]
|
||||
p.cache = p.cache[:len(p.cache)-1]
|
||||
return item
|
||||
}
|
||||
|
||||
func (p *Pool[T]) Put(item T) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.MaxCap == 0 || len(p.cache) < int(p.MaxCap) {
|
||||
p.cache = append(p.cache, item)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pool[T]) Reset() {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
clear(p.cache)
|
||||
p.cache = nil
|
||||
}
|
@ -194,32 +194,32 @@ type SyncClosersIF interface {
|
||||
|
||||
type SyncClosers struct {
|
||||
closers []io.Closer
|
||||
ref int32
|
||||
ref atomic.Int32
|
||||
}
|
||||
|
||||
var _ SyncClosersIF = (*SyncClosers)(nil)
|
||||
|
||||
func (c *SyncClosers) AcquireReference() bool {
|
||||
ref := atomic.AddInt32(&c.ref, 1)
|
||||
ref := c.ref.Add(1)
|
||||
if ref > 0 {
|
||||
// log.Debugf("SyncClosers.AcquireReference %p,ref=%d\n", c, ref)
|
||||
return true
|
||||
}
|
||||
atomic.StoreInt32(&c.ref, math.MinInt16)
|
||||
c.ref.Store(math.MinInt16)
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *SyncClosers) Close() error {
|
||||
ref := atomic.AddInt32(&c.ref, -1)
|
||||
ref := c.ref.Add(-1)
|
||||
if ref < -1 {
|
||||
atomic.StoreInt32(&c.ref, math.MinInt16)
|
||||
c.ref.Store(math.MinInt16)
|
||||
return nil
|
||||
}
|
||||
// log.Debugf("SyncClosers.Close %p,ref=%d\n", c, ref+1)
|
||||
if ref > 0 {
|
||||
return nil
|
||||
}
|
||||
atomic.StoreInt32(&c.ref, math.MinInt16)
|
||||
c.ref.Store(math.MinInt16)
|
||||
|
||||
var errs []error
|
||||
for _, closer := range c.closers {
|
||||
@ -234,7 +234,7 @@ func (c *SyncClosers) Close() error {
|
||||
|
||||
func (c *SyncClosers) Add(closer io.Closer) {
|
||||
if closer != nil {
|
||||
if atomic.LoadInt32(&c.ref) < 0 {
|
||||
if c.ref.Load() < 0 {
|
||||
panic("Not reusable")
|
||||
}
|
||||
c.closers = append(c.closers, closer)
|
||||
@ -243,7 +243,7 @@ func (c *SyncClosers) Add(closer io.Closer) {
|
||||
|
||||
func (c *SyncClosers) AddIfCloser(a any) {
|
||||
if closer, ok := a.(io.Closer); ok {
|
||||
if atomic.LoadInt32(&c.ref) < 0 {
|
||||
if c.ref.Load() < 0 {
|
||||
panic("Not reusable")
|
||||
}
|
||||
c.closers = append(c.closers, closer)
|
||||
|
@ -2,9 +2,6 @@ package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"html"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/cmd/flags"
|
||||
@ -41,41 +38,6 @@ func ErrorResp(c *gin.Context, err error, code int, l ...bool) {
|
||||
//c.Abort()
|
||||
}
|
||||
|
||||
// ErrorPage is used to return error page HTML.
|
||||
// It also returns standard HTTP status code.
|
||||
// @param l: if true, log error
|
||||
func ErrorPage(c *gin.Context, err error, code int, l ...bool) {
|
||||
|
||||
if len(l) > 0 && l[0] {
|
||||
if flags.Debug || flags.Dev {
|
||||
log.Errorf("%+v", err)
|
||||
} else {
|
||||
log.Errorf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
codes := fmt.Sprintf("%d %s", code, http.StatusText(code))
|
||||
|
||||
html := fmt.Sprintf(`<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<meta name="color-scheme" content="dark light" />
|
||||
<meta name="robots" content="noindex" />
|
||||
<title>%s</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>%s</h1>
|
||||
<hr>
|
||||
<p>%s</p>
|
||||
</body>
|
||||
</html>`,
|
||||
codes, codes, html.EscapeString(hidePrivacy(err.Error())))
|
||||
c.Data(code, "text/html; charset=utf-8", []byte(html))
|
||||
c.Abort()
|
||||
}
|
||||
|
||||
func ErrorWithDataResp(c *gin.Context, err error, code int, data interface{}, l ...bool) {
|
||||
if len(l) > 0 && l[0] {
|
||||
if flags.Debug || flags.Dev {
|
||||
|
@ -3,9 +3,9 @@ package handles
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/task"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
@ -15,7 +15,6 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/task"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
"github.com/gin-gonic/gin"
|
||||
@ -72,26 +71,13 @@ func toContentResp(objs []model.ObjTree) []ArchiveContentResp {
|
||||
return ret
|
||||
}
|
||||
|
||||
func FsArchiveMetaSplit(c *gin.Context) {
|
||||
func FsArchiveMeta(c *gin.Context) {
|
||||
var req ArchiveMetaReq
|
||||
if err := c.ShouldBind(&req); err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(req.Path, "/@s") {
|
||||
req.Path = strings.TrimPrefix(req.Path, "/@s")
|
||||
SharingArchiveMeta(c, &req)
|
||||
return
|
||||
}
|
||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
if user.IsGuest() && user.Disabled {
|
||||
common.ErrorStrResp(c, "Guest user is disabled, login please", 401)
|
||||
return
|
||||
}
|
||||
FsArchiveMeta(c, &req, user)
|
||||
}
|
||||
|
||||
func FsArchiveMeta(c *gin.Context, req *ArchiveMetaReq, user *model.User) {
|
||||
if !user.CanReadArchives() {
|
||||
common.ErrorResp(c, errs.PermissionDenied, 403)
|
||||
return
|
||||
@ -156,27 +142,19 @@ type ArchiveListReq struct {
|
||||
InnerPath string `json:"inner_path" form:"inner_path"`
|
||||
}
|
||||
|
||||
func FsArchiveListSplit(c *gin.Context) {
|
||||
type ArchiveListResp struct {
|
||||
Content []ObjResp `json:"content"`
|
||||
Total int64 `json:"total"`
|
||||
}
|
||||
|
||||
func FsArchiveList(c *gin.Context) {
|
||||
var req ArchiveListReq
|
||||
if err := c.ShouldBind(&req); err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
req.Validate()
|
||||
if strings.HasPrefix(req.Path, "/@s") {
|
||||
req.Path = strings.TrimPrefix(req.Path, "/@s")
|
||||
SharingArchiveList(c, &req)
|
||||
return
|
||||
}
|
||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
if user.IsGuest() && user.Disabled {
|
||||
common.ErrorStrResp(c, "Guest user is disabled, login please", 401)
|
||||
return
|
||||
}
|
||||
FsArchiveList(c, &req, user)
|
||||
}
|
||||
|
||||
func FsArchiveList(c *gin.Context, req *ArchiveListReq, user *model.User) {
|
||||
if !user.CanReadArchives() {
|
||||
common.ErrorResp(c, errs.PermissionDenied, 403)
|
||||
return
|
||||
@ -223,7 +201,7 @@ func FsArchiveList(c *gin.Context, req *ArchiveListReq, user *model.User) {
|
||||
ret, _ := utils.SliceConvert(objs, func(src model.Obj) (ObjResp, error) {
|
||||
return toObjsRespWithoutSignAndThumb(src), nil
|
||||
})
|
||||
common.SuccessResp(c, common.PageResp{
|
||||
common.SuccessResp(c, ArchiveListResp{
|
||||
Content: ret,
|
||||
Total: int64(total),
|
||||
})
|
||||
@ -320,7 +298,7 @@ func ArchiveDown(c *gin.Context) {
|
||||
filename := stdpath.Base(innerPath)
|
||||
storage, err := fs.GetStorage(archiveRawPath, &fs.GetStoragesArgs{})
|
||||
if err != nil {
|
||||
common.ErrorPage(c, err, 500)
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
if common.ShouldProxy(storage, filename) {
|
||||
@ -340,7 +318,7 @@ func ArchiveDown(c *gin.Context) {
|
||||
InnerPath: innerPath,
|
||||
})
|
||||
if err != nil {
|
||||
common.ErrorPage(c, err, 500)
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
redirect(c, link)
|
||||
@ -354,7 +332,7 @@ func ArchiveProxy(c *gin.Context) {
|
||||
filename := stdpath.Base(innerPath)
|
||||
storage, err := fs.GetStorage(archiveRawPath, &fs.GetStoragesArgs{})
|
||||
if err != nil {
|
||||
common.ErrorPage(c, err, 500)
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
if canProxy(storage, filename) {
|
||||
@ -370,34 +348,16 @@ func ArchiveProxy(c *gin.Context) {
|
||||
InnerPath: innerPath,
|
||||
})
|
||||
if err != nil {
|
||||
common.ErrorPage(c, err, 500)
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
proxy(c, link, file, storage.GetStorage().ProxyRange)
|
||||
} else {
|
||||
common.ErrorPage(c, errors.New("proxy not allowed"), 403)
|
||||
common.ErrorStrResp(c, "proxy not allowed", 403)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func proxyInternalExtract(c *gin.Context, rc io.ReadCloser, size int64, fileName string) {
|
||||
defer func() {
|
||||
if err := rc.Close(); err != nil {
|
||||
log.Errorf("failed to close file streamer, %v", err)
|
||||
}
|
||||
}()
|
||||
headers := map[string]string{
|
||||
"Referrer-Policy": "no-referrer",
|
||||
"Cache-Control": "max-age=0, no-cache, no-store, must-revalidate",
|
||||
}
|
||||
headers["Content-Disposition"] = utils.GenerateContentDisposition(fileName)
|
||||
contentType := c.Request.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
contentType = utils.GetMimeType(fileName)
|
||||
}
|
||||
c.DataFromReader(200, size, contentType, rc, headers)
|
||||
}
|
||||
|
||||
func ArchiveInternalExtract(c *gin.Context) {
|
||||
archiveRawPath := c.Request.Context().Value(conf.PathKey).(string)
|
||||
innerPath := utils.FixAndCleanPath(c.Query("inner"))
|
||||
@ -413,11 +373,25 @@ func ArchiveInternalExtract(c *gin.Context) {
|
||||
InnerPath: innerPath,
|
||||
})
|
||||
if err != nil {
|
||||
common.ErrorPage(c, err, 500)
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err := rc.Close(); err != nil {
|
||||
log.Errorf("failed to close file streamer, %v", err)
|
||||
}
|
||||
}()
|
||||
headers := map[string]string{
|
||||
"Referrer-Policy": "no-referrer",
|
||||
"Cache-Control": "max-age=0, no-cache, no-store, must-revalidate",
|
||||
}
|
||||
fileName := stdpath.Base(innerPath)
|
||||
proxyInternalExtract(c, rc, size, fileName)
|
||||
headers["Content-Disposition"] = utils.GenerateContentDisposition(fileName)
|
||||
contentType := c.Request.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
contentType = utils.GetMimeType(fileName)
|
||||
}
|
||||
c.DataFromReader(200, size, contentType, rc, headers)
|
||||
}
|
||||
|
||||
func ArchiveExtensions(c *gin.Context) {
|
||||
|
@ -26,7 +26,7 @@ func Down(c *gin.Context) {
|
||||
filename := stdpath.Base(rawPath)
|
||||
storage, err := fs.GetStorage(rawPath, &fs.GetStoragesArgs{})
|
||||
if err != nil {
|
||||
common.ErrorPage(c, err, 500)
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
if common.ShouldProxy(storage, filename) {
|
||||
@ -40,7 +40,7 @@ func Down(c *gin.Context) {
|
||||
Redirect: true,
|
||||
})
|
||||
if err != nil {
|
||||
common.ErrorPage(c, err, 500)
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
redirect(c, link)
|
||||
@ -52,7 +52,7 @@ func Proxy(c *gin.Context) {
|
||||
filename := stdpath.Base(rawPath)
|
||||
storage, err := fs.GetStorage(rawPath, &fs.GetStoragesArgs{})
|
||||
if err != nil {
|
||||
common.ErrorPage(c, err, 500)
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
if canProxy(storage, filename) {
|
||||
@ -67,12 +67,12 @@ func Proxy(c *gin.Context) {
|
||||
Type: c.Query("type"),
|
||||
})
|
||||
if err != nil {
|
||||
common.ErrorPage(c, err, 500)
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
proxy(c, link, file, storage.GetStorage().ProxyRange)
|
||||
} else {
|
||||
common.ErrorPage(c, errors.New("proxy not allowed"), 403)
|
||||
common.ErrorStrResp(c, "proxy not allowed", 403)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -89,7 +89,7 @@ func redirect(c *gin.Context, link *model.Link) {
|
||||
}
|
||||
link.URL, err = utils.InjectQuery(link.URL, query)
|
||||
if err != nil {
|
||||
common.ErrorPage(c, err, 500)
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -106,7 +106,7 @@ func proxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) {
|
||||
}
|
||||
link.URL, err = utils.InjectQuery(link.URL, query)
|
||||
if err != nil {
|
||||
common.ErrorPage(c, err, 500)
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -149,9 +149,9 @@ func proxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) {
|
||||
log.Errorf("%s %s local proxy error: %+v", c.Request.Method, c.Request.URL.Path, err)
|
||||
} else {
|
||||
if statusCode, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok {
|
||||
common.ErrorPage(c, err, int(statusCode), true)
|
||||
common.ErrorResp(c, err, int(statusCode), true)
|
||||
} else {
|
||||
common.ErrorPage(c, err, 500, true)
|
||||
common.ErrorResp(c, err, 500, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -56,27 +56,14 @@ type FsListResp struct {
|
||||
Provider string `json:"provider"`
|
||||
}
|
||||
|
||||
func FsListSplit(c *gin.Context) {
|
||||
func FsList(c *gin.Context) {
|
||||
var req ListReq
|
||||
if err := c.ShouldBind(&req); err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
req.Validate()
|
||||
if strings.HasPrefix(req.Path, "/@s") {
|
||||
req.Path = strings.TrimPrefix(req.Path, "/@s")
|
||||
SharingList(c, &req)
|
||||
return
|
||||
}
|
||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
if user.IsGuest() && user.Disabled {
|
||||
common.ErrorStrResp(c, "Guest user is disabled, login please", 401)
|
||||
return
|
||||
}
|
||||
FsList(c, &req, user)
|
||||
}
|
||||
|
||||
func FsList(c *gin.Context, req *ListReq, user *model.User) {
|
||||
reqPath, err := user.JoinPath(req.Path)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 403)
|
||||
@ -256,26 +243,13 @@ type FsGetResp struct {
|
||||
Related []ObjResp `json:"related"`
|
||||
}
|
||||
|
||||
func FsGetSplit(c *gin.Context) {
|
||||
func FsGet(c *gin.Context) {
|
||||
var req FsGetReq
|
||||
if err := c.ShouldBind(&req); err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(req.Path, "/@s") {
|
||||
req.Path = strings.TrimPrefix(req.Path, "/@s")
|
||||
SharingGet(c, &req)
|
||||
return
|
||||
}
|
||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
if user.IsGuest() && user.Disabled {
|
||||
common.ErrorStrResp(c, "Guest user is disabled, login please", 401)
|
||||
return
|
||||
}
|
||||
FsGet(c, &req, user)
|
||||
}
|
||||
|
||||
func FsGet(c *gin.Context, req *FsGetReq, user *model.User) {
|
||||
reqPath, err := user.JoinPath(req.Path)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 403)
|
||||
|
@ -1,568 +0,0 @@
|
||||
package handles
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/sharing"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
"github.com/OpenListTeam/go-cache"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func SharingGet(c *gin.Context, req *FsGetReq) {
|
||||
sid, path, _ := strings.Cut(strings.TrimPrefix(req.Path, "/"), "/")
|
||||
if sid == "" {
|
||||
common.ErrorStrResp(c, "invalid share id", 400)
|
||||
return
|
||||
}
|
||||
s, obj, err := sharing.Get(c.Request.Context(), sid, path, model.SharingListArgs{
|
||||
Refresh: false,
|
||||
Pwd: req.Password,
|
||||
})
|
||||
if dealError(c, err) {
|
||||
return
|
||||
}
|
||||
_ = countAccess(c.ClientIP(), s)
|
||||
fakePath := fmt.Sprintf("/%s/%s", sid, path)
|
||||
url := ""
|
||||
if !obj.IsDir() {
|
||||
url = fmt.Sprintf("%s/sd%s", common.GetApiUrl(c), utils.EncodePath(fakePath, true))
|
||||
if s.Pwd != "" {
|
||||
url += "?pwd=" + s.Pwd
|
||||
}
|
||||
}
|
||||
thumb, _ := model.GetThumb(obj)
|
||||
common.SuccessResp(c, FsGetResp{
|
||||
ObjResp: ObjResp{
|
||||
Id: "",
|
||||
Path: fakePath,
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfoStr: obj.GetHash().String(),
|
||||
HashInfo: obj.GetHash().Export(),
|
||||
Sign: "",
|
||||
Type: utils.GetFileType(obj.GetName()),
|
||||
Thumb: thumb,
|
||||
},
|
||||
RawURL: url,
|
||||
Readme: s.Readme,
|
||||
Header: s.Header,
|
||||
Provider: "unknown",
|
||||
Related: nil,
|
||||
})
|
||||
}
|
||||
|
||||
func SharingList(c *gin.Context, req *ListReq) {
|
||||
sid, path, _ := strings.Cut(strings.TrimPrefix(req.Path, "/"), "/")
|
||||
if sid == "" {
|
||||
common.ErrorStrResp(c, "invalid share id", 400)
|
||||
return
|
||||
}
|
||||
s, objs, err := sharing.List(c.Request.Context(), sid, path, model.SharingListArgs{
|
||||
Refresh: req.Refresh,
|
||||
Pwd: req.Password,
|
||||
})
|
||||
if dealError(c, err) {
|
||||
return
|
||||
}
|
||||
_ = countAccess(c.ClientIP(), s)
|
||||
fakePath := fmt.Sprintf("/%s/%s", sid, path)
|
||||
total, objs := pagination(objs, &req.PageReq)
|
||||
common.SuccessResp(c, FsListResp{
|
||||
Content: utils.MustSliceConvert(objs, func(obj model.Obj) ObjResp {
|
||||
thumb, _ := model.GetThumb(obj)
|
||||
return ObjResp{
|
||||
Id: "",
|
||||
Path: stdpath.Join(fakePath, obj.GetName()),
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfoStr: obj.GetHash().String(),
|
||||
HashInfo: obj.GetHash().Export(),
|
||||
Sign: "",
|
||||
Thumb: thumb,
|
||||
Type: utils.GetObjType(obj.GetName(), obj.IsDir()),
|
||||
}
|
||||
}),
|
||||
Total: int64(total),
|
||||
Readme: s.Readme,
|
||||
Header: s.Header,
|
||||
Write: false,
|
||||
Provider: "unknown",
|
||||
})
|
||||
}
|
||||
|
||||
func SharingArchiveMeta(c *gin.Context, req *ArchiveMetaReq) {
|
||||
if !setting.GetBool(conf.ShareArchivePreview) {
|
||||
common.ErrorStrResp(c, "sharing archives previewing is not allowed", 403)
|
||||
return
|
||||
}
|
||||
sid, path, _ := strings.Cut(strings.TrimPrefix(req.Path, "/"), "/")
|
||||
if sid == "" {
|
||||
common.ErrorStrResp(c, "invalid share id", 400)
|
||||
return
|
||||
}
|
||||
archiveArgs := model.ArchiveArgs{
|
||||
LinkArgs: model.LinkArgs{
|
||||
Header: c.Request.Header,
|
||||
Type: c.Query("type"),
|
||||
},
|
||||
Password: req.ArchivePass,
|
||||
}
|
||||
s, ret, err := sharing.ArchiveMeta(c.Request.Context(), sid, path, model.SharingArchiveMetaArgs{
|
||||
ArchiveMetaArgs: model.ArchiveMetaArgs{
|
||||
ArchiveArgs: archiveArgs,
|
||||
Refresh: req.Refresh,
|
||||
},
|
||||
Pwd: req.Password,
|
||||
})
|
||||
if dealError(c, err) {
|
||||
return
|
||||
}
|
||||
_ = countAccess(c.ClientIP(), s)
|
||||
fakePath := fmt.Sprintf("/%s/%s", sid, path)
|
||||
url := fmt.Sprintf("%s/sad%s", common.GetApiUrl(c), utils.EncodePath(fakePath, true))
|
||||
if s.Pwd != "" {
|
||||
url += "?pwd=" + s.Pwd
|
||||
}
|
||||
common.SuccessResp(c, ArchiveMetaResp{
|
||||
Comment: ret.GetComment(),
|
||||
IsEncrypted: ret.IsEncrypted(),
|
||||
Content: toContentResp(ret.GetTree()),
|
||||
Sort: ret.Sort,
|
||||
RawURL: url,
|
||||
Sign: "",
|
||||
})
|
||||
}
|
||||
|
||||
func SharingArchiveList(c *gin.Context, req *ArchiveListReq) {
|
||||
if !setting.GetBool(conf.ShareArchivePreview) {
|
||||
common.ErrorStrResp(c, "sharing archives previewing is not allowed", 403)
|
||||
return
|
||||
}
|
||||
sid, path, _ := strings.Cut(strings.TrimPrefix(req.Path, "/"), "/")
|
||||
if sid == "" {
|
||||
common.ErrorStrResp(c, "invalid share id", 400)
|
||||
return
|
||||
}
|
||||
innerArgs := model.ArchiveInnerArgs{
|
||||
ArchiveArgs: model.ArchiveArgs{
|
||||
LinkArgs: model.LinkArgs{
|
||||
Header: c.Request.Header,
|
||||
Type: c.Query("type"),
|
||||
},
|
||||
Password: req.ArchivePass,
|
||||
},
|
||||
InnerPath: utils.FixAndCleanPath(req.InnerPath),
|
||||
}
|
||||
s, objs, err := sharing.ArchiveList(c.Request.Context(), sid, path, model.SharingArchiveListArgs{
|
||||
ArchiveListArgs: model.ArchiveListArgs{
|
||||
ArchiveInnerArgs: innerArgs,
|
||||
Refresh: req.Refresh,
|
||||
},
|
||||
Pwd: req.Password,
|
||||
})
|
||||
if dealError(c, err) {
|
||||
return
|
||||
}
|
||||
_ = countAccess(c.ClientIP(), s)
|
||||
total, objs := pagination(objs, &req.PageReq)
|
||||
ret, _ := utils.SliceConvert(objs, func(src model.Obj) (ObjResp, error) {
|
||||
return toObjsRespWithoutSignAndThumb(src), nil
|
||||
})
|
||||
common.SuccessResp(c, common.PageResp{
|
||||
Content: ret,
|
||||
Total: int64(total),
|
||||
})
|
||||
}
|
||||
|
||||
func SharingDown(c *gin.Context) {
|
||||
sid := c.Request.Context().Value(conf.SharingIDKey).(string)
|
||||
path := c.Request.Context().Value(conf.PathKey).(string)
|
||||
pwd := c.Query("pwd")
|
||||
s, err := op.GetSharingById(sid)
|
||||
if err == nil {
|
||||
if !s.Valid() {
|
||||
err = errs.InvalidSharing
|
||||
} else if !s.Verify(pwd) {
|
||||
err = errs.WrongShareCode
|
||||
} else if len(s.Files) != 1 && path == "/" {
|
||||
err = errors.New("cannot get sharing root link")
|
||||
}
|
||||
}
|
||||
if dealErrorPage(c, err) {
|
||||
return
|
||||
}
|
||||
unwrapPath, err := op.GetSharingUnwrapPath(s, path)
|
||||
if err != nil {
|
||||
common.ErrorPage(c, errors.New("failed get sharing unwrap path"), 500)
|
||||
return
|
||||
}
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(unwrapPath)
|
||||
if dealErrorPage(c, err) {
|
||||
return
|
||||
}
|
||||
if setting.GetBool(conf.ShareForceProxy) || common.ShouldProxy(storage, stdpath.Base(actualPath)) {
|
||||
link, obj, err := op.Link(c.Request.Context(), storage, actualPath, model.LinkArgs{
|
||||
Header: c.Request.Header,
|
||||
Type: c.Query("type"),
|
||||
})
|
||||
if err != nil {
|
||||
common.ErrorPage(c, errors.WithMessage(err, "failed get sharing link"), 500)
|
||||
return
|
||||
}
|
||||
_ = countAccess(c.ClientIP(), s)
|
||||
proxy(c, link, obj, storage.GetStorage().ProxyRange)
|
||||
} else {
|
||||
link, _, err := op.Link(c.Request.Context(), storage, actualPath, model.LinkArgs{
|
||||
IP: c.ClientIP(),
|
||||
Header: c.Request.Header,
|
||||
Type: c.Query("type"),
|
||||
Redirect: true,
|
||||
})
|
||||
if err != nil {
|
||||
common.ErrorPage(c, errors.WithMessage(err, "failed get sharing link"), 500)
|
||||
return
|
||||
}
|
||||
_ = countAccess(c.ClientIP(), s)
|
||||
redirect(c, link)
|
||||
}
|
||||
}
|
||||
|
||||
func SharingArchiveExtract(c *gin.Context) {
|
||||
if !setting.GetBool(conf.ShareArchivePreview) {
|
||||
common.ErrorPage(c, errors.New("sharing archives previewing is not allowed"), 403)
|
||||
return
|
||||
}
|
||||
sid := c.Request.Context().Value(conf.SharingIDKey).(string)
|
||||
path := c.Request.Context().Value(conf.PathKey).(string)
|
||||
pwd := c.Query("pwd")
|
||||
innerPath := utils.FixAndCleanPath(c.Query("inner"))
|
||||
archivePass := c.Query("pass")
|
||||
s, err := op.GetSharingById(sid)
|
||||
if err == nil {
|
||||
if !s.Valid() {
|
||||
err = errs.InvalidSharing
|
||||
} else if !s.Verify(pwd) {
|
||||
err = errs.WrongShareCode
|
||||
} else if len(s.Files) != 1 && path == "/" {
|
||||
err = errors.New("cannot extract sharing root")
|
||||
}
|
||||
}
|
||||
if dealErrorPage(c, err) {
|
||||
return
|
||||
}
|
||||
unwrapPath, err := op.GetSharingUnwrapPath(s, path)
|
||||
if err != nil {
|
||||
common.ErrorPage(c, errors.New("failed get sharing unwrap path"), 500)
|
||||
return
|
||||
}
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(unwrapPath)
|
||||
if dealErrorPage(c, err) {
|
||||
return
|
||||
}
|
||||
args := model.ArchiveInnerArgs{
|
||||
ArchiveArgs: model.ArchiveArgs{
|
||||
LinkArgs: model.LinkArgs{
|
||||
Header: c.Request.Header,
|
||||
Type: c.Query("type"),
|
||||
},
|
||||
Password: archivePass,
|
||||
},
|
||||
InnerPath: innerPath,
|
||||
}
|
||||
if _, ok := storage.(driver.ArchiveReader); ok {
|
||||
if setting.GetBool(conf.ShareForceProxy) || common.ShouldProxy(storage, stdpath.Base(actualPath)) {
|
||||
link, obj, err := op.DriverExtract(c.Request.Context(), storage, actualPath, args)
|
||||
if dealErrorPage(c, err) {
|
||||
return
|
||||
}
|
||||
proxy(c, link, obj, storage.GetStorage().ProxyRange)
|
||||
} else {
|
||||
args.Redirect = true
|
||||
link, _, err := op.DriverExtract(c.Request.Context(), storage, actualPath, args)
|
||||
if dealErrorPage(c, err) {
|
||||
return
|
||||
}
|
||||
redirect(c, link)
|
||||
}
|
||||
} else {
|
||||
rc, size, err := op.InternalExtract(c.Request.Context(), storage, actualPath, args)
|
||||
if dealErrorPage(c, err) {
|
||||
return
|
||||
}
|
||||
fileName := stdpath.Base(innerPath)
|
||||
proxyInternalExtract(c, rc, size, fileName)
|
||||
}
|
||||
}
|
||||
|
||||
func dealError(c *gin.Context, err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
} else if errors.Is(err, errs.SharingNotFound) {
|
||||
common.ErrorStrResp(c, "the share does not exist", 500)
|
||||
} else if errors.Is(err, errs.InvalidSharing) {
|
||||
common.ErrorStrResp(c, "the share has expired or is no longer valid", 500)
|
||||
} else if errors.Is(err, errs.WrongShareCode) {
|
||||
common.ErrorResp(c, err, 403)
|
||||
} else if errors.Is(err, errs.WrongArchivePassword) {
|
||||
common.ErrorResp(c, err, 202)
|
||||
} else {
|
||||
common.ErrorResp(c, err, 500)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func dealErrorPage(c *gin.Context, err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
} else if errors.Is(err, errs.SharingNotFound) {
|
||||
common.ErrorPage(c, errors.New("the share does not exist"), 500)
|
||||
} else if errors.Is(err, errs.InvalidSharing) {
|
||||
common.ErrorPage(c, errors.New("the share has expired or is no longer valid"), 500)
|
||||
} else if errors.Is(err, errs.WrongShareCode) {
|
||||
common.ErrorPage(c, err, 403)
|
||||
} else if errors.Is(err, errs.WrongArchivePassword) {
|
||||
common.ErrorPage(c, err, 202)
|
||||
} else {
|
||||
common.ErrorPage(c, err, 500)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type SharingResp struct {
|
||||
*model.Sharing
|
||||
CreatorName string `json:"creator"`
|
||||
CreatorRole int `json:"creator_role"`
|
||||
}
|
||||
|
||||
func GetSharing(c *gin.Context) {
|
||||
sid := c.Query("id")
|
||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
s, err := op.GetSharingById(sid)
|
||||
if err != nil || (!user.IsAdmin() && s.Creator.ID != user.ID) {
|
||||
common.ErrorStrResp(c, "sharing not found", 404)
|
||||
return
|
||||
}
|
||||
common.SuccessResp(c, SharingResp{
|
||||
Sharing: s,
|
||||
CreatorName: s.Creator.Username,
|
||||
CreatorRole: s.Creator.Role,
|
||||
})
|
||||
}
|
||||
|
||||
func ListSharings(c *gin.Context) {
|
||||
var req model.PageReq
|
||||
if err := c.ShouldBind(&req); err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
req.Validate()
|
||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
var sharings []model.Sharing
|
||||
var total int64
|
||||
var err error
|
||||
if user.IsAdmin() {
|
||||
sharings, total, err = op.GetSharings(req.Page, req.PerPage)
|
||||
} else {
|
||||
sharings, total, err = op.GetSharingsByCreatorId(user.ID, req.Page, req.PerPage)
|
||||
}
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 500, true)
|
||||
return
|
||||
}
|
||||
common.SuccessResp(c, common.PageResp{
|
||||
Content: utils.MustSliceConvert(sharings, func(s model.Sharing) SharingResp {
|
||||
return SharingResp{
|
||||
Sharing: &s,
|
||||
CreatorName: s.Creator.Username,
|
||||
CreatorRole: s.Creator.Role,
|
||||
}
|
||||
}),
|
||||
Total: total,
|
||||
})
|
||||
}
|
||||
|
||||
type CreateSharingReq struct {
|
||||
Files []string `json:"files"`
|
||||
Expires *time.Time `json:"expires"`
|
||||
Pwd string `json:"pwd"`
|
||||
MaxAccessed int `json:"max_accessed"`
|
||||
Disabled bool `json:"disabled"`
|
||||
Remark string `json:"remark"`
|
||||
Readme string `json:"readme"`
|
||||
Header string `json:"header"`
|
||||
model.Sort
|
||||
}
|
||||
|
||||
type UpdateSharingReq struct {
|
||||
ID string `json:"id"`
|
||||
Accessed int `json:"accessed"`
|
||||
CreateSharingReq
|
||||
}
|
||||
|
||||
func UpdateSharing(c *gin.Context) {
|
||||
var req UpdateSharingReq
|
||||
if err := c.ShouldBind(&req); err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
if len(req.Files) == 0 || (len(req.Files) == 1 && req.Files[0] == "") {
|
||||
common.ErrorStrResp(c, "must add at least 1 object", 400)
|
||||
return
|
||||
}
|
||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
if !user.CanShare() {
|
||||
common.ErrorStrResp(c, "permission denied", 403)
|
||||
return
|
||||
}
|
||||
for i, s := range req.Files {
|
||||
s = utils.FixAndCleanPath(s)
|
||||
req.Files[i] = s
|
||||
if !user.IsAdmin() && !strings.HasPrefix(s, user.BasePath) {
|
||||
common.ErrorStrResp(c, fmt.Sprintf("permission denied to share path [%s]", s), 500)
|
||||
return
|
||||
}
|
||||
}
|
||||
s, err := op.GetSharingById(req.ID)
|
||||
if err != nil || (!user.IsAdmin() && s.CreatorId != user.ID) {
|
||||
common.ErrorStrResp(c, "sharing not found", 404)
|
||||
return
|
||||
}
|
||||
s.Files = req.Files
|
||||
s.Expires = req.Expires
|
||||
s.Pwd = req.Pwd
|
||||
s.Accessed = req.Accessed
|
||||
s.MaxAccessed = req.MaxAccessed
|
||||
s.Disabled = req.Disabled
|
||||
s.Sort = req.Sort
|
||||
s.Header = req.Header
|
||||
s.Readme = req.Readme
|
||||
s.Remark = req.Remark
|
||||
if err = op.UpdateSharing(s); err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
} else {
|
||||
common.SuccessResp(c, SharingResp{
|
||||
Sharing: s,
|
||||
CreatorName: s.Creator.Username,
|
||||
CreatorRole: s.Creator.Role,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func CreateSharing(c *gin.Context) {
|
||||
var req CreateSharingReq
|
||||
var err error
|
||||
if err = c.ShouldBind(&req); err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
if len(req.Files) == 0 || (len(req.Files) == 1 && req.Files[0] == "") {
|
||||
common.ErrorStrResp(c, "must add at least 1 object", 400)
|
||||
return
|
||||
}
|
||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
if !user.CanShare() {
|
||||
common.ErrorStrResp(c, "permission denied", 403)
|
||||
return
|
||||
}
|
||||
for i, s := range req.Files {
|
||||
s = utils.FixAndCleanPath(s)
|
||||
req.Files[i] = s
|
||||
if !user.IsAdmin() && !strings.HasPrefix(s, user.BasePath) {
|
||||
common.ErrorStrResp(c, fmt.Sprintf("permission denied to share path [%s]", s), 500)
|
||||
return
|
||||
}
|
||||
}
|
||||
s := &model.Sharing{
|
||||
SharingDB: &model.SharingDB{
|
||||
Expires: req.Expires,
|
||||
Pwd: req.Pwd,
|
||||
Accessed: 0,
|
||||
MaxAccessed: req.MaxAccessed,
|
||||
Disabled: req.Disabled,
|
||||
Sort: req.Sort,
|
||||
Remark: req.Remark,
|
||||
Readme: req.Readme,
|
||||
Header: req.Header,
|
||||
},
|
||||
Files: req.Files,
|
||||
Creator: user,
|
||||
}
|
||||
var id string
|
||||
if id, err = op.CreateSharing(s); err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
} else {
|
||||
s.ID = id
|
||||
common.SuccessResp(c, SharingResp{
|
||||
Sharing: s,
|
||||
CreatorName: s.Creator.Username,
|
||||
CreatorRole: s.Creator.Role,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func DeleteSharing(c *gin.Context) {
|
||||
sid := c.Query("id")
|
||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
s, err := op.GetSharingById(sid)
|
||||
if err != nil || (!user.IsAdmin() && s.CreatorId != user.ID) {
|
||||
common.ErrorResp(c, err, 404)
|
||||
return
|
||||
}
|
||||
if err = op.DeleteSharing(sid); err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
} else {
|
||||
common.SuccessResp(c)
|
||||
}
|
||||
}
|
||||
|
||||
func SetEnableSharing(disable bool) func(ctx *gin.Context) {
|
||||
return func(c *gin.Context) {
|
||||
sid := c.Query("id")
|
||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
s, err := op.GetSharingById(sid)
|
||||
if err != nil || (!user.IsAdmin() && s.CreatorId != user.ID) {
|
||||
common.ErrorStrResp(c, "sharing not found", 404)
|
||||
return
|
||||
}
|
||||
s.Disabled = disable
|
||||
if err = op.UpdateSharing(s, true); err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
} else {
|
||||
common.SuccessResp(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
AccessCache = cache.NewMemCache[interface{}]()
|
||||
AccessCountDelay = 30 * time.Minute
|
||||
)
|
||||
|
||||
func countAccess(ip string, s *model.Sharing) error {
|
||||
key := fmt.Sprintf("%s:%s", s.ID, ip)
|
||||
_, ok := AccessCache.Get(key)
|
||||
if !ok {
|
||||
AccessCache.Set(key, struct{}{}, cache.WithEx[interface{}](AccessCountDelay))
|
||||
s.Accessed += 1
|
||||
return op.UpdateSharing(s, true)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -14,65 +14,63 @@ import (
|
||||
|
||||
// Auth is a middleware that checks if the user is logged in.
|
||||
// if token is empty, set user to guest
|
||||
func Auth(allowDisabledGuest bool) func(c *gin.Context) {
|
||||
return func(c *gin.Context) {
|
||||
token := c.GetHeader("Authorization")
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(setting.GetStr(conf.Token))) == 1 {
|
||||
admin, err := op.GetAdmin()
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
common.GinWithValue(c, conf.UserKey, admin)
|
||||
log.Debugf("use admin token: %+v", admin)
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
if token == "" {
|
||||
guest, err := op.GetGuest()
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
if !allowDisabledGuest && guest.Disabled {
|
||||
common.ErrorStrResp(c, "Guest user is disabled, login please", 401)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
common.GinWithValue(c, conf.UserKey, guest)
|
||||
log.Debugf("use empty token: %+v", guest)
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
userClaims, err := common.ParseToken(token)
|
||||
func Auth(c *gin.Context) {
|
||||
token := c.GetHeader("Authorization")
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(setting.GetStr(conf.Token))) == 1 {
|
||||
admin, err := op.GetAdmin()
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 401)
|
||||
common.ErrorResp(c, err, 500)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
user, err := op.GetUserByName(userClaims.Username)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 401)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
// validate password timestamp
|
||||
if userClaims.PwdTS != user.PwdTS {
|
||||
common.ErrorStrResp(c, "Password has been changed, login please", 401)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
if user.Disabled {
|
||||
common.ErrorStrResp(c, "Current user is disabled, replace please", 401)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
common.GinWithValue(c, conf.UserKey, user)
|
||||
log.Debugf("use login token: %+v", user)
|
||||
common.GinWithValue(c, conf.UserKey, admin)
|
||||
log.Debugf("use admin token: %+v", admin)
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
if token == "" {
|
||||
guest, err := op.GetGuest()
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
if guest.Disabled {
|
||||
common.ErrorStrResp(c, "Guest user is disabled, login please", 401)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
common.GinWithValue(c, conf.UserKey, guest)
|
||||
log.Debugf("use empty token: %+v", guest)
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
userClaims, err := common.ParseToken(token)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 401)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
user, err := op.GetUserByName(userClaims.Username)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 401)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
// validate password timestamp
|
||||
if userClaims.PwdTS != user.PwdTS {
|
||||
common.ErrorStrResp(c, "Password has been changed, login please", 401)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
if user.Disabled {
|
||||
common.ErrorStrResp(c, "Current user is disabled, replace please", 401)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
common.GinWithValue(c, conf.UserKey, user)
|
||||
log.Debugf("use login token: %+v", user)
|
||||
c.Next()
|
||||
}
|
||||
|
||||
func Authn(c *gin.Context) {
|
||||
|
@ -15,19 +15,14 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func PathParse(c *gin.Context) {
|
||||
rawPath := parsePath(c.Param("path"))
|
||||
common.GinWithValue(c, conf.PathKey, rawPath)
|
||||
c.Next()
|
||||
}
|
||||
|
||||
func Down(verifyFunc func(string, string) error) func(c *gin.Context) {
|
||||
return func(c *gin.Context) {
|
||||
rawPath := c.Request.Context().Value(conf.PathKey).(string)
|
||||
rawPath := parsePath(c.Param("path"))
|
||||
common.GinWithValue(c, conf.PathKey, rawPath)
|
||||
meta, err := op.GetNearestMeta(rawPath)
|
||||
if err != nil {
|
||||
if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
|
||||
common.ErrorPage(c, err, 500, true)
|
||||
common.ErrorResp(c, err, 500, true)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -37,7 +32,7 @@ func Down(verifyFunc func(string, string) error) func(c *gin.Context) {
|
||||
s := c.Query("sign")
|
||||
err = verifyFunc(rawPath, strings.TrimSuffix(s, "/"))
|
||||
if err != nil {
|
||||
common.ErrorPage(c, err, 401)
|
||||
common.ErrorResp(c, err, 401)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
|
@ -1,18 +0,0 @@
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func SharingIdParse(c *gin.Context) {
|
||||
sid := c.Param("sid")
|
||||
common.GinWithValue(c, conf.SharingIDKey, sid)
|
||||
c.Next()
|
||||
}
|
||||
|
||||
func EmptyPathParse(c *gin.Context) {
|
||||
common.GinWithValue(c, conf.PathKey, "/")
|
||||
c.Next()
|
||||
}
|
@ -44,29 +44,20 @@ func Init(e *gin.Engine) {
|
||||
|
||||
downloadLimiter := middlewares.DownloadRateLimiter(stream.ClientDownloadLimit)
|
||||
signCheck := middlewares.Down(sign.Verify)
|
||||
g.GET("/d/*path", middlewares.PathParse, signCheck, downloadLimiter, handles.Down)
|
||||
g.GET("/p/*path", middlewares.PathParse, signCheck, downloadLimiter, handles.Proxy)
|
||||
g.HEAD("/d/*path", middlewares.PathParse, signCheck, handles.Down)
|
||||
g.HEAD("/p/*path", middlewares.PathParse, signCheck, handles.Proxy)
|
||||
g.GET("/d/*path", signCheck, downloadLimiter, handles.Down)
|
||||
g.GET("/p/*path", signCheck, downloadLimiter, handles.Proxy)
|
||||
g.HEAD("/d/*path", signCheck, handles.Down)
|
||||
g.HEAD("/p/*path", signCheck, handles.Proxy)
|
||||
archiveSignCheck := middlewares.Down(sign.VerifyArchive)
|
||||
g.GET("/ad/*path", middlewares.PathParse, archiveSignCheck, downloadLimiter, handles.ArchiveDown)
|
||||
g.GET("/ap/*path", middlewares.PathParse, archiveSignCheck, downloadLimiter, handles.ArchiveProxy)
|
||||
g.GET("/ae/*path", middlewares.PathParse, archiveSignCheck, downloadLimiter, handles.ArchiveInternalExtract)
|
||||
g.HEAD("/ad/*path", middlewares.PathParse, archiveSignCheck, handles.ArchiveDown)
|
||||
g.HEAD("/ap/*path", middlewares.PathParse, archiveSignCheck, handles.ArchiveProxy)
|
||||
g.HEAD("/ae/*path", middlewares.PathParse, archiveSignCheck, handles.ArchiveInternalExtract)
|
||||
|
||||
g.GET("/sd/:sid", middlewares.EmptyPathParse, middlewares.SharingIdParse, downloadLimiter, handles.SharingDown)
|
||||
g.GET("/sd/:sid/*path", middlewares.PathParse, middlewares.SharingIdParse, downloadLimiter, handles.SharingDown)
|
||||
g.HEAD("/sd/:sid", middlewares.EmptyPathParse, middlewares.SharingIdParse, handles.SharingDown)
|
||||
g.HEAD("/sd/:sid/*path", middlewares.PathParse, middlewares.SharingIdParse, handles.SharingDown)
|
||||
g.GET("/sad/:sid", middlewares.EmptyPathParse, middlewares.SharingIdParse, downloadLimiter, handles.SharingArchiveExtract)
|
||||
g.GET("/sad/:sid/*path", middlewares.PathParse, middlewares.SharingIdParse, downloadLimiter, handles.SharingArchiveExtract)
|
||||
g.HEAD("/sad/:sid", middlewares.EmptyPathParse, middlewares.SharingIdParse, handles.SharingArchiveExtract)
|
||||
g.HEAD("/sad/:sid/*path", middlewares.PathParse, middlewares.SharingIdParse, handles.SharingArchiveExtract)
|
||||
g.GET("/ad/*path", archiveSignCheck, downloadLimiter, handles.ArchiveDown)
|
||||
g.GET("/ap/*path", archiveSignCheck, downloadLimiter, handles.ArchiveProxy)
|
||||
g.GET("/ae/*path", archiveSignCheck, downloadLimiter, handles.ArchiveInternalExtract)
|
||||
g.HEAD("/ad/*path", archiveSignCheck, handles.ArchiveDown)
|
||||
g.HEAD("/ap/*path", archiveSignCheck, handles.ArchiveProxy)
|
||||
g.HEAD("/ae/*path", archiveSignCheck, handles.ArchiveInternalExtract)
|
||||
|
||||
api := g.Group("/api")
|
||||
auth := api.Group("", middlewares.Auth(false))
|
||||
auth := api.Group("", middlewares.Auth)
|
||||
webauthn := api.Group("/authn", middlewares.Authn)
|
||||
|
||||
api.POST("/auth/login", handles.Login)
|
||||
@ -102,9 +93,7 @@ func Init(e *gin.Engine) {
|
||||
public.Any("/archive_extensions", handles.ArchiveExtensions)
|
||||
|
||||
_fs(auth.Group("/fs"))
|
||||
fsAndShare(api.Group("/fs", middlewares.Auth(true)))
|
||||
_task(auth.Group("/task", middlewares.AuthNotGuest))
|
||||
_sharing(auth.Group("/share", middlewares.AuthNotGuest))
|
||||
admin(auth.Group("/admin", middlewares.AuthAdmin))
|
||||
if flags.Debug || flags.Dev {
|
||||
debug(g.Group("/debug"))
|
||||
@ -180,16 +169,10 @@ func admin(g *gin.RouterGroup) {
|
||||
index.GET("/progress", middlewares.SearchIndex, handles.GetProgress)
|
||||
}
|
||||
|
||||
func fsAndShare(g *gin.RouterGroup) {
|
||||
g.Any("/list", handles.FsListSplit)
|
||||
g.Any("/get", handles.FsGetSplit)
|
||||
a := g.Group("/archive")
|
||||
a.Any("/meta", handles.FsArchiveMetaSplit)
|
||||
a.Any("/list", handles.FsArchiveListSplit)
|
||||
}
|
||||
|
||||
func _fs(g *gin.RouterGroup) {
|
||||
g.Any("/list", handles.FsList)
|
||||
g.Any("/search", middlewares.SearchIndex, handles.Search)
|
||||
g.Any("/get", handles.FsGet)
|
||||
g.Any("/other", handles.FsOther)
|
||||
g.Any("/dirs", handles.FsDirs)
|
||||
g.POST("/mkdir", handles.FsMkdir)
|
||||
@ -209,23 +192,16 @@ func _fs(g *gin.RouterGroup) {
|
||||
// g.POST("/add_qbit", handles.AddQbittorrent)
|
||||
// g.POST("/add_transmission", handles.SetTransmission)
|
||||
g.POST("/add_offline_download", handles.AddOfflineDownload)
|
||||
g.POST("/archive/decompress", handles.FsArchiveDecompress)
|
||||
a := g.Group("/archive")
|
||||
a.Any("/meta", handles.FsArchiveMeta)
|
||||
a.Any("/list", handles.FsArchiveList)
|
||||
a.POST("/decompress", handles.FsArchiveDecompress)
|
||||
}
|
||||
|
||||
func _task(g *gin.RouterGroup) {
|
||||
handles.SetupTaskRoute(g)
|
||||
}
|
||||
|
||||
func _sharing(g *gin.RouterGroup) {
|
||||
g.Any("/list", handles.ListSharings)
|
||||
g.GET("/get", handles.GetSharing)
|
||||
g.POST("/create", handles.CreateSharing)
|
||||
g.POST("/update", handles.UpdateSharing)
|
||||
g.POST("/delete", handles.DeleteSharing)
|
||||
g.POST("/enable", handles.SetEnableSharing(false))
|
||||
g.POST("/disable", handles.SetEnableSharing(true))
|
||||
}
|
||||
|
||||
func Cors(r *gin.Engine) {
|
||||
config := cors.DefaultConfig()
|
||||
// config.AllowAllOrigins = true
|
||||
|
@ -116,10 +116,10 @@ func UpdateIndex() {
|
||||
mainColor := setting.GetStr(conf.MainColor)
|
||||
utils.Log.Debug("Applying replacements for default pages...")
|
||||
replaceMap1 := map[string]string{
|
||||
"https://res.oplist.org/logo/logo.svg": favicon,
|
||||
"https://res.oplist.org/logo/logo.png": logo,
|
||||
"Loading...": title,
|
||||
"main_color: undefined": fmt.Sprintf("main_color: '%s'", mainColor),
|
||||
"https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.svg": favicon,
|
||||
"https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.png": logo,
|
||||
"Loading...": title,
|
||||
"main_color: undefined": fmt.Sprintf("main_color: '%s'", mainColor),
|
||||
}
|
||||
conf.ManageHtml = replaceStrings(conf.RawIndexHtml, replaceMap1)
|
||||
utils.Log.Debug("Applying replacements for manage pages...")
|
||||
|
Reference in New Issue
Block a user