mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-09-20 04:36:09 +08:00
Compare commits
20 Commits
v4.1.0
...
renovate/g
Author | SHA1 | Date | |
---|---|---|---|
18c0f551fe | |||
df479ba806 | |||
5ae8e96237 | |||
aa0ced47b0 | |||
ab747d9052 | |||
93c06213d4 | |||
b9b8eed285 | |||
317d190b77 | |||
52d7d819ad | |||
0483e0f868 | |||
08dae4f55f | |||
9ac0484bc0 | |||
8cf15183a0 | |||
c8f2aaaa55 | |||
1208bd0a83 | |||
6b096bcad4 | |||
58dbf088f9 | |||
05ff7908f2 | |||
a703b736c9 | |||
e458f2ab53 |
1
.github/workflows/beta_release.yml
vendored
1
.github/workflows/beta_release.yml
vendored
@ -93,6 +93,7 @@ jobs:
|
||||
run: bash build.sh dev web
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Build
|
||||
uses: OpenListTeam/cgo-actions@v1.2.2
|
||||
|
1
.github/workflows/build.yml
vendored
1
.github/workflows/build.yml
vendored
@ -39,6 +39,7 @@ jobs:
|
||||
run: bash build.sh dev web
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Build
|
||||
uses: OpenListTeam/cgo-actions@v1.2.2
|
||||
|
1
.github/workflows/release.yml
vendored
1
.github/workflows/release.yml
vendored
@ -66,6 +66,7 @@ jobs:
|
||||
bash build.sh release ${{ matrix.build-type == 'lite' && 'lite' || '' }} ${{ matrix.target-platform }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
|
2
.github/workflows/release_docker.yml
vendored
2
.github/workflows/release_docker.yml
vendored
@ -66,6 +66,7 @@ jobs:
|
||||
run: bash build.sh release docker-multiplatform
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
@ -105,6 +106,7 @@ jobs:
|
||||
run: bash build.sh release lite docker-multiplatform
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
|
1
.github/workflows/test_docker.yml
vendored
1
.github/workflows/test_docker.yml
vendored
@ -55,6 +55,7 @@ jobs:
|
||||
run: bash build.sh beta docker-multiplatform
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
|
9
build.sh
9
build.sh
@ -4,6 +4,9 @@ builtAt="$(date +'%F %T %z')"
|
||||
gitAuthor="The OpenList Projects Contributors <noreply@openlist.team>"
|
||||
gitCommit=$(git log --pretty=format:"%h" -1)
|
||||
|
||||
# Set frontend repository, default to OpenListTeam/OpenList-Frontend
|
||||
frontendRepo="${FRONTEND_REPO:-OpenListTeam/OpenList-Frontend}"
|
||||
|
||||
githubAuthArgs=""
|
||||
if [ -n "$GITHUB_TOKEN" ]; then
|
||||
githubAuthArgs="--header \"Authorization: Bearer $GITHUB_TOKEN\""
|
||||
@ -25,7 +28,7 @@ else
|
||||
git tag -d beta || true
|
||||
# Always true if there's no tag
|
||||
version=$(git describe --abbrev=0 --tags 2>/dev/null || echo "v0.0.0")
|
||||
webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
|
||||
webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/$frontendRepo/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
|
||||
fi
|
||||
|
||||
echo "backend version: $version"
|
||||
@ -46,7 +49,7 @@ ldflags="\
|
||||
"
|
||||
|
||||
FetchWebRolling() {
|
||||
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/tags/rolling\"")
|
||||
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/tags/rolling\"")
|
||||
pre_release_assets=$(echo "$pre_release_json" | jq -r '.assets[].browser_download_url')
|
||||
|
||||
# There is no lite for rolling
|
||||
@ -59,7 +62,7 @@ FetchWebRolling() {
|
||||
}
|
||||
|
||||
FetchWebRelease() {
|
||||
release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"")
|
||||
release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/latest\"")
|
||||
release_assets=$(echo "$release_json" | jq -r '.assets[].browser_download_url')
|
||||
|
||||
if [ "$useLite" = true ]; then
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/db"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/charmbracelet/bubbles/table"
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
@ -22,8 +23,8 @@ var storageCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
var disableStorageCmd = &cobra.Command{
|
||||
Use: "disable",
|
||||
Short: "Disable a storage",
|
||||
Use: "disable [mount path]",
|
||||
Short: "Disable a storage by mount path",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("mount path is required")
|
||||
@ -34,15 +35,48 @@ var disableStorageCmd = &cobra.Command{
|
||||
storage, err := db.GetStorageByMountPath(mountPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query storage: %+v", err)
|
||||
} else {
|
||||
storage.Disabled = true
|
||||
err = db.UpdateStorage(storage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update storage: %+v", err)
|
||||
} else {
|
||||
fmt.Printf("Storage with mount path [%s] have been disabled\n", mountPath)
|
||||
}
|
||||
storage.Disabled = true
|
||||
err = db.UpdateStorage(storage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update storage: %+v", err)
|
||||
}
|
||||
utils.Log.Infof("Storage with mount path [%s] has been disabled from CLI", mountPath)
|
||||
fmt.Printf("Storage with mount path [%s] has been disabled\n", mountPath)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var deleteStorageCmd = &cobra.Command{
|
||||
Use: "delete [id]",
|
||||
Short: "Delete a storage by id",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("id is required")
|
||||
}
|
||||
id, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("id must be a number")
|
||||
}
|
||||
|
||||
if force, _ := cmd.Flags().GetBool("force"); force {
|
||||
fmt.Printf("Are you sure you want to delete storage with id [%d]? [y/N]: ", id)
|
||||
var confirm string
|
||||
fmt.Scanln(&confirm)
|
||||
if confirm != "y" && confirm != "Y" {
|
||||
fmt.Println("Delete operation cancelled.")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
Init()
|
||||
defer Release()
|
||||
err = db.DeleteStorageById(uint(id))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete storage by id: %+v", err)
|
||||
}
|
||||
utils.Log.Infof("Storage with id [%d] have been deleted from CLI", id)
|
||||
fmt.Printf("Storage with id [%d] have been deleted\n", id)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -152,6 +186,8 @@ func init() {
|
||||
storageCmd.AddCommand(disableStorageCmd)
|
||||
storageCmd.AddCommand(listStorageCmd)
|
||||
storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height")
|
||||
storageCmd.AddCommand(deleteStorageCmd)
|
||||
deleteStorageCmd.Flags().BoolP("force", "f", false, "Force delete without confirmation")
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
sdk "github.com/OpenListTeam/115-sdk-go"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/avast/retry-go"
|
||||
@ -69,9 +70,6 @@ func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp
|
||||
// }
|
||||
|
||||
func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
|
||||
fileSize := stream.GetSize()
|
||||
chunkSize := calPartSize(fileSize)
|
||||
|
||||
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
|
||||
if err != nil {
|
||||
return err
|
||||
@ -86,9 +84,15 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
|
||||
return err
|
||||
}
|
||||
|
||||
fileSize := stream.GetSize()
|
||||
chunkSize := calPartSize(fileSize)
|
||||
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
|
||||
parts := make([]oss.UploadPart, partNum)
|
||||
offset := int64(0)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := int64(1); i <= partNum; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -98,10 +102,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
|
||||
if i == partNum {
|
||||
partSize = fileSize - (i-1)*chunkSize
|
||||
}
|
||||
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
||||
rd, err := ss.GetSectionReader(offset, partSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
||||
err = retry.Do(func() error {
|
||||
_ = rd.Reset()
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
||||
rd.Seek(0, io.SeekStart)
|
||||
part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i))
|
||||
if err != nil {
|
||||
return err
|
||||
@ -112,6 +119,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second))
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -11,7 +11,8 @@ type Addition struct {
|
||||
driver.RootID
|
||||
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
|
||||
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
AccessToken string
|
||||
AccessToken string
|
||||
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
@ -22,6 +23,11 @@ var config = driver.Config{
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Pan123{}
|
||||
// 新增默认选项 要在RegisterDriver初始化设置 才会对正在使用的用户生效
|
||||
return &Pan123{
|
||||
Addition: Addition{
|
||||
UploadThread: 3,
|
||||
},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -6,11 +6,16 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
@ -69,18 +74,15 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
|
||||
}
|
||||
|
||||
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tmpF, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// fetch s3 pre signed urls
|
||||
size := file.GetSize()
|
||||
chunkSize := min(size, 16*utils.MB)
|
||||
chunkCount := int(size / chunkSize)
|
||||
chunkSize := int64(16 * utils.MB)
|
||||
chunkCount := 1
|
||||
if size > chunkSize {
|
||||
chunkCount = int((size + chunkSize - 1) / chunkSize)
|
||||
}
|
||||
lastChunkSize := size % chunkSize
|
||||
if lastChunkSize > 0 {
|
||||
chunkCount++
|
||||
} else {
|
||||
if lastChunkSize == 0 {
|
||||
lastChunkSize = chunkSize
|
||||
}
|
||||
// only 1 batch is allowed
|
||||
@ -90,73 +92,103 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
batchSize = 10
|
||||
getS3UploadUrl = d.getS3PreSignedUrls
|
||||
}
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
thread := min(int(chunkCount), d.UploadThread)
|
||||
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
for i := 1; i <= chunkCount; i += batchSize {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
break
|
||||
}
|
||||
start := i
|
||||
end := min(i+batchSize, chunkCount+1)
|
||||
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end)
|
||||
s3PreSignedUrls, err := getS3UploadUrl(uploadCtx, upReq, start, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// upload each chunk
|
||||
for j := start; j < end; j++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
for cur := start; cur < end; cur++ {
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
break
|
||||
}
|
||||
offset := int64(cur-1) * chunkSize
|
||||
curSize := chunkSize
|
||||
if j == chunkCount {
|
||||
if cur == chunkCount {
|
||||
curSize = lastChunkSize
|
||||
}
|
||||
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.NewSectionReader(tmpF, chunkSize*int64(j-1), curSize), curSize, false, getS3UploadUrl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(float64(j) * 100 / float64(chunkCount))
|
||||
var reader *stream.SectionReader
|
||||
var rateLimitedRd io.Reader
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
if reader == nil {
|
||||
var err error
|
||||
reader, err = ss.GetSectionReader(offset, curSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Do: func(ctx context.Context) error {
|
||||
reader.Seek(0, io.SeekStart)
|
||||
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
|
||||
if uploadUrl == "" {
|
||||
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
|
||||
}
|
||||
reader.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, rateLimitedRd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.ContentLength = curSize
|
||||
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusForbidden {
|
||||
singleflight.AnyGroup.Do(fmt.Sprintf("Pan123.newUpload_%p", threadG), func() (any, error) {
|
||||
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
|
||||
return nil, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
|
||||
}
|
||||
progress := 10.0 + 85.0*float64(threadG.Success())/float64(chunkCount)
|
||||
up(progress)
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
ss.RecycleSectionReader(reader)
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
if err := threadG.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer up(100)
|
||||
// complete s3 upload
|
||||
return d.completeS3(ctx, upReq, file, chunkCount > 1)
|
||||
}
|
||||
|
||||
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader *io.SectionReader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
|
||||
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
|
||||
if uploadUrl == "" {
|
||||
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
|
||||
}
|
||||
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, reader))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = curSize
|
||||
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusForbidden {
|
||||
if retry {
|
||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
|
||||
}
|
||||
// refresh s3 pre signed urls
|
||||
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
|
||||
// retry
|
||||
reader.Seek(0, io.SeekStart)
|
||||
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -2,7 +2,9 @@ package _123_open
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
@ -95,6 +97,22 @@ func (d *Open123) Rename(ctx context.Context, srcObj model.Obj, newName string)
|
||||
}
|
||||
|
||||
func (d *Open123) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
// 尝试使用上传+MD5秒传功能实现复制
|
||||
// 1. 创建文件
|
||||
// parentFileID 父目录id,上传到根目录时填写 0
|
||||
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse parentFileID error: %v", err)
|
||||
}
|
||||
etag := srcObj.(File).Etag
|
||||
createResp, err := d.create(parentFileId, srcObj.GetName(), etag, srcObj.GetSize(), 2, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 是否秒传
|
||||
if createResp.Data.Reuse {
|
||||
return nil
|
||||
}
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
@ -104,27 +122,66 @@ func (d *Open123) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return d.trash(fileId)
|
||||
}
|
||||
|
||||
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 1. 创建文件
|
||||
// parentFileID 父目录id,上传到根目录时填写 0
|
||||
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse parentFileID error: %v", err)
|
||||
}
|
||||
// etag 文件md5
|
||||
etag := file.GetHash().GetHash(utils.MD5)
|
||||
|
||||
if len(etag) < utils.MD5.Width {
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// 是否秒传
|
||||
if createResp.Data.Reuse {
|
||||
return nil
|
||||
// 秒传成功才会返回正确的 FileID,否则为 0
|
||||
if createResp.Data.FileID != 0 {
|
||||
return File{
|
||||
FileName: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
FileId: createResp.Data.FileID,
|
||||
Type: 2,
|
||||
Etag: etag,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return d.Upload(ctx, file, createResp, up)
|
||||
// 2. 上传分片
|
||||
err = d.Upload(ctx, file, createResp, up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 3. 上传完毕
|
||||
for range 60 {
|
||||
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
|
||||
// 返回错误代码未知,如:20103,文档也没有具体说
|
||||
if err == nil && uploadCompleteResp.Data.Completed && uploadCompleteResp.Data.FileID != 0 {
|
||||
up(100)
|
||||
return File{
|
||||
FileName: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
FileId: uploadCompleteResp.Data.FileID,
|
||||
Type: 2,
|
||||
Etag: etag,
|
||||
}, nil
|
||||
}
|
||||
// 若接口返回的completed为 false 时,则需间隔1秒继续轮询此接口,获取上传最终结果。
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return nil, fmt.Errorf("upload complete timeout")
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Open123)(nil)
|
||||
var _ driver.PutResult = (*Open123)(nil)
|
||||
|
@ -73,7 +73,9 @@ func (f File) GetName() string {
|
||||
}
|
||||
|
||||
func (f File) CreateTime() time.Time {
|
||||
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.CreateAt)
|
||||
// 返回的时间没有时区信息,默认 UTC+8
|
||||
loc := time.FixedZone("UTC+8", 8*60*60)
|
||||
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.CreateAt, loc)
|
||||
if err != nil {
|
||||
return time.Now()
|
||||
}
|
||||
@ -81,7 +83,9 @@ func (f File) CreateTime() time.Time {
|
||||
}
|
||||
|
||||
func (f File) ModTime() time.Time {
|
||||
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.UpdateAt)
|
||||
// 返回的时间没有时区信息,默认 UTC+8
|
||||
loc := time.FixedZone("UTC+8", 8*60*60)
|
||||
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.UpdateAt, loc)
|
||||
if err != nil {
|
||||
return time.Now()
|
||||
}
|
||||
@ -154,52 +158,23 @@ type DownloadInfoResp struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// 创建文件V2返回
|
||||
type UploadCreateResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
FileID int64 `json:"fileID"`
|
||||
PreuploadID string `json:"preuploadID"`
|
||||
Reuse bool `json:"reuse"`
|
||||
SliceSize int64 `json:"sliceSize"`
|
||||
FileID int64 `json:"fileID"`
|
||||
PreuploadID string `json:"preuploadID"`
|
||||
Reuse bool `json:"reuse"`
|
||||
SliceSize int64 `json:"sliceSize"`
|
||||
Servers []string `json:"servers"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UploadUrlResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
PresignedURL string `json:"presignedURL"`
|
||||
}
|
||||
}
|
||||
|
||||
// 上传完毕V2返回
|
||||
type UploadCompleteResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
Async bool `json:"async"`
|
||||
Completed bool `json:"completed"`
|
||||
FileID int64 `json:"fileID"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UploadAsyncResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
Completed bool `json:"completed"`
|
||||
FileID int64 `json:"fileID"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UploadResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
AccessKeyId string `json:"AccessKeyId"`
|
||||
Bucket string `json:"Bucket"`
|
||||
Key string `json:"Key"`
|
||||
SecretAccessKey string `json:"SecretAccessKey"`
|
||||
SessionToken string `json:"SessionToken"`
|
||||
FileId int64 `json:"FileId"`
|
||||
Reuse bool `json:"Reuse"`
|
||||
EndPoint string `json:"EndPoint"`
|
||||
StorageNode string `json:"StorageNode"`
|
||||
UploadId string `json:"UploadId"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
@ -1,21 +1,28 @@
|
||||
package _123_open
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
// 创建文件 V2
|
||||
func (d *Open123) create(parentFileID int64, filename string, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) {
|
||||
var resp UploadCreateResp
|
||||
_, err := d.Request(UploadCreate, http.MethodPost, func(req *resty.Request) {
|
||||
@ -34,21 +41,135 @@ func (d *Open123) create(parentFileID int64, filename string, etag string, size
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) url(preuploadID string, sliceNo int64) (string, error) {
|
||||
// get upload url
|
||||
var resp UploadUrlResp
|
||||
_, err := d.Request(UploadUrl, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"preuploadId": preuploadID,
|
||||
"sliceNo": sliceNo,
|
||||
})
|
||||
}, &resp)
|
||||
// 上传分片 V2
|
||||
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
|
||||
uploadDomain := createResp.Data.Servers[0]
|
||||
size := file.GetSize()
|
||||
chunkSize := createResp.Data.SliceSize
|
||||
uploadNums := (size + chunkSize - 1) / chunkSize
|
||||
thread := min(int(uploadNums), d.UploadThread)
|
||||
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize))
|
||||
if err != nil {
|
||||
return "", err
|
||||
return err
|
||||
}
|
||||
return resp.Data.PresignedURL, nil
|
||||
for partIndex := range uploadNums {
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
break
|
||||
}
|
||||
partIndex := partIndex
|
||||
partNumber := partIndex + 1 // 分片号从1开始
|
||||
offset := partIndex * chunkSize
|
||||
size := min(chunkSize, size-offset)
|
||||
var reader *stream.SectionReader
|
||||
var rateLimitedRd io.Reader
|
||||
sliceMD5 := ""
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
if reader == nil {
|
||||
var err error
|
||||
// 每个分片一个reader
|
||||
reader, err = ss.GetSectionReader(offset, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 计算当前分片的MD5
|
||||
sliceMD5, err = utils.HashReader(utils.MD5, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Do: func(ctx context.Context) error {
|
||||
// 重置分片reader位置,因为HashReader、上一次失败已经读取到分片EOF
|
||||
reader.Seek(0, io.SeekStart)
|
||||
|
||||
// 创建表单数据
|
||||
var b bytes.Buffer
|
||||
w := multipart.NewWriter(&b)
|
||||
// 添加表单字段
|
||||
err = w.WriteField("preuploadID", createResp.Data.PreuploadID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.WriteField("sliceNo", strconv.FormatInt(partNumber, 10))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.WriteField("sliceMD5", sliceMD5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 写入文件内容
|
||||
fw, err := w.CreateFormFile("slice", fmt.Sprintf("%s.part%d", file.GetName(), partNumber))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = utils.CopyWithBuffer(fw, rateLimitedRd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 创建请求并设置header
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadDomain+"/upload/v2/file/slice", &b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 设置请求头
|
||||
req.Header.Add("Authorization", "Bearer "+d.AccessToken)
|
||||
req.Header.Add("Content-Type", w.FormDataContentType())
|
||||
req.Header.Add("Platform", "open_platform")
|
||||
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
return fmt.Errorf("slice %d upload failed, status code: %d", partNumber, res.StatusCode)
|
||||
}
|
||||
var resp BaseResp
|
||||
respBody, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal(respBody, &resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Code != 0 {
|
||||
return fmt.Errorf("slice %d upload failed: %s", partNumber, resp.Message)
|
||||
}
|
||||
|
||||
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
|
||||
up(progress)
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
ss.RecycleSectionReader(reader)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if err := threadG.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// 上传完毕
|
||||
func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
|
||||
var resp UploadCompleteResp
|
||||
_, err := d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
|
||||
@ -61,91 +182,3 @@ func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) async(preuploadID string) (*UploadAsyncResp, error) {
|
||||
var resp UploadAsyncResp
|
||||
_, err := d.Request(UploadAsync, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"preuploadID": preuploadID,
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
|
||||
size := file.GetSize()
|
||||
chunkSize := createResp.Data.SliceSize
|
||||
uploadNums := (size + chunkSize - 1) / chunkSize
|
||||
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.UploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
for partIndex := int64(0); partIndex < uploadNums; partIndex++ {
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
partIndex := partIndex
|
||||
partNumber := partIndex + 1 // 分片号从1开始
|
||||
offset := partIndex * chunkSize
|
||||
size := min(chunkSize, size-offset)
|
||||
limitedReader, err := file.RangeRead(http_range.Range{
|
||||
Start: offset,
|
||||
Length: size})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
limitedReader = driver.NewLimitedUploadStream(ctx, limitedReader)
|
||||
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
uploadPartUrl, err := d.url(createResp.Data.PreuploadID, partNumber)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "PUT", uploadPartUrl, limitedReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = size
|
||||
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
|
||||
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
|
||||
up(progress)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := threadG.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uploadCompleteResp.Data.Async == false || uploadCompleteResp.Data.Completed {
|
||||
return nil
|
||||
}
|
||||
|
||||
for {
|
||||
uploadAsyncResp, err := d.async(createResp.Data.PreuploadID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uploadAsyncResp.Data.Completed {
|
||||
break
|
||||
}
|
||||
}
|
||||
up(100)
|
||||
return nil
|
||||
}
|
||||
|
@ -19,16 +19,14 @@ var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于
|
||||
AccessToken = InitApiInfo(Api+"/api/v1/access_token", 1)
|
||||
RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1)
|
||||
UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1)
|
||||
FileList = InitApiInfo(Api+"/api/v2/file/list", 4)
|
||||
FileList = InitApiInfo(Api+"/api/v2/file/list", 3)
|
||||
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 0)
|
||||
Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2)
|
||||
Move = InitApiInfo(Api+"/api/v1/file/move", 1)
|
||||
Rename = InitApiInfo(Api+"/api/v1/file/name", 1)
|
||||
Trash = InitApiInfo(Api+"/api/v1/file/trash", 2)
|
||||
UploadCreate = InitApiInfo(Api+"/upload/v1/file/create", 2)
|
||||
UploadUrl = InitApiInfo(Api+"/upload/v1/file/get_upload_url", 0)
|
||||
UploadComplete = InitApiInfo(Api+"/upload/v1/file/upload_complete", 0)
|
||||
UploadAsync = InitApiInfo(Api+"/upload/v1/file/upload_async_result", 1)
|
||||
UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2)
|
||||
UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0)
|
||||
)
|
||||
|
||||
func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
|
@ -531,12 +531,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
}
|
||||
|
||||
size := stream.GetSize()
|
||||
var partSize = d.getPartSize(size)
|
||||
part := size / partSize
|
||||
if size%partSize > 0 {
|
||||
part++
|
||||
} else if part == 0 {
|
||||
part = 1
|
||||
partSize := d.getPartSize(size)
|
||||
part := int64(1)
|
||||
if size > partSize {
|
||||
part = (size + partSize - 1) / partSize
|
||||
}
|
||||
partInfos := make([]PartInfo, 0, part)
|
||||
for i := int64(0); i < part; i++ {
|
||||
@ -638,11 +636,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
// Update Progress
|
||||
r := io.TeeReader(limitReader, p)
|
||||
|
||||
req, err := http.NewRequest("PUT", uploadPartInfo.UploadUrl, r)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartInfo.UploadUrl, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Length", fmt.Sprint(partSize))
|
||||
req.Header.Set("Origin", "https://yun.139.com")
|
||||
@ -788,12 +785,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
size := stream.GetSize()
|
||||
// Progress
|
||||
p := driver.NewProgress(size, up)
|
||||
var partSize = d.getPartSize(size)
|
||||
part := size / partSize
|
||||
if size%partSize > 0 {
|
||||
part++
|
||||
} else if part == 0 {
|
||||
part = 1
|
||||
partSize := d.getPartSize(size)
|
||||
part := int64(1)
|
||||
if size > partSize {
|
||||
part = (size + partSize - 1) / partSize
|
||||
}
|
||||
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
|
||||
for i := int64(0); i < part; i++ {
|
||||
@ -807,12 +802,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
limitReader := io.LimitReader(rateLimited, byteSize)
|
||||
// Update Progress
|
||||
r := io.TeeReader(limitReader, p)
|
||||
req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, resp.Data.UploadResult.RedirectionURL, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName()))
|
||||
req.Header.Set("contentSize", strconv.FormatInt(size, 10))
|
||||
req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1))
|
||||
|
@ -365,11 +365,10 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||
log.Debugf("uploadData: %+v", uploadData)
|
||||
requestURL := uploadData.RequestURL
|
||||
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
|
||||
req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
for _, v := range uploadHeaders {
|
||||
i := strings.Index(v, "=")
|
||||
req.Header.Set(v[0:i], v[i+1:])
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
@ -472,7 +473,7 @@ func (y *Cloud189PC) refreshSession() (err error) {
|
||||
// 无法上传大小为0的文件
|
||||
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||
size := file.GetSize()
|
||||
sliceSize := partSize(size)
|
||||
sliceSize := min(size, partSize(size))
|
||||
|
||||
params := Params{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
@ -499,65 +500,99 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ss, err := stream.NewStreamSectionReader(file, int(sliceSize))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
|
||||
threadG, upCtx := errgroup.NewOrderedGroupWithContext(ctx, y.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
count := int(size / sliceSize)
|
||||
count := 1
|
||||
if size > sliceSize {
|
||||
count = int((size + sliceSize - 1) / sliceSize)
|
||||
}
|
||||
lastPartSize := size % sliceSize
|
||||
if lastPartSize > 0 {
|
||||
count++
|
||||
} else {
|
||||
if lastPartSize == 0 {
|
||||
lastPartSize = sliceSize
|
||||
}
|
||||
fileMd5 := utils.MD5.NewFunc()
|
||||
silceMd5 := utils.MD5.NewFunc()
|
||||
|
||||
silceMd5Hexs := make([]string, 0, count)
|
||||
teeReader := io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5))
|
||||
byteSize := sliceSize
|
||||
silceMd5 := utils.MD5.NewFunc()
|
||||
var writers io.Writer = silceMd5
|
||||
|
||||
fileMd5Hex := file.GetHash().GetHash(utils.MD5)
|
||||
var fileMd5 hash.Hash
|
||||
if len(fileMd5Hex) != utils.MD5.Width {
|
||||
fileMd5 = utils.MD5.NewFunc()
|
||||
writers = io.MultiWriter(silceMd5, fileMd5)
|
||||
}
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
offset := int64((i)-1) * sliceSize
|
||||
size := sliceSize
|
||||
if i == count {
|
||||
byteSize = lastPartSize
|
||||
}
|
||||
byteData := make([]byte, byteSize)
|
||||
// 读取块
|
||||
silceMd5.Reset()
|
||||
if _, err := io.ReadFull(teeReader, byteData); err != io.EOF && err != nil {
|
||||
return nil, err
|
||||
size = lastPartSize
|
||||
}
|
||||
partInfo := ""
|
||||
var reader *stream.SectionReader
|
||||
var rateLimitedRd io.Reader
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
if reader == nil {
|
||||
var err error
|
||||
reader, err = ss.GetSectionReader(offset, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
silceMd5.Reset()
|
||||
w, err := utils.CopyWithBuffer(writers, reader)
|
||||
if w != size {
|
||||
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", size, w, err)
|
||||
}
|
||||
// 计算块md5并进行hex和base64编码
|
||||
md5Bytes := silceMd5.Sum(nil)
|
||||
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
|
||||
partInfo = fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
||||
|
||||
// 计算块md5并进行hex和base64编码
|
||||
md5Bytes := silceMd5.Sum(nil)
|
||||
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
|
||||
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
||||
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Do: func(ctx context.Context) error {
|
||||
reader.Seek(0, io.SeekStart)
|
||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// step.4 上传切片
|
||||
uploadUrl := uploadUrls[0]
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(float64(threadG.Success()) * 100 / float64(count))
|
||||
return nil
|
||||
})
|
||||
// step.4 上传切片
|
||||
uploadUrl := uploadUrls[0]
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
|
||||
driver.NewLimitedUploadStream(ctx, rateLimitedRd), isFamily)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(float64(threadG.Success()) * 100 / float64(count))
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
ss.RecycleSectionReader(reader)
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||
if fileMd5 != nil {
|
||||
fileMd5Hex = strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||
}
|
||||
sliceMd5Hex := fileMd5Hex
|
||||
if file.GetSize() > sliceSize {
|
||||
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
|
||||
@ -620,11 +655,12 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
||||
cache = tmpF
|
||||
}
|
||||
sliceSize := partSize(size)
|
||||
count := int(size / sliceSize)
|
||||
count := 1
|
||||
if size > sliceSize {
|
||||
count = int((size + sliceSize - 1) / sliceSize)
|
||||
}
|
||||
lastSliceSize := size % sliceSize
|
||||
if lastSliceSize > 0 {
|
||||
count++
|
||||
} else {
|
||||
if lastSliceSize == 0 {
|
||||
lastSliceSize = sliceSize
|
||||
}
|
||||
|
||||
@ -738,7 +774,8 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
||||
}
|
||||
|
||||
// step.4 上传切片
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(cache, offset, byteSize), isFamily)
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize))
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -78,10 +78,18 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
obj, err := d.get(ctx, path, dst, sub)
|
||||
if err == nil {
|
||||
return obj, nil
|
||||
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return &model.Object{
|
||||
Path: path,
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
}, nil
|
||||
}
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
@ -99,7 +107,27 @@ func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
var objs []model.Obj
|
||||
fsArgs := &fs.ListArgs{NoLog: true, Refresh: args.Refresh}
|
||||
for _, dst := range dsts {
|
||||
tmp, err := d.list(ctx, dst, sub, fsArgs)
|
||||
tmp, err := fs.List(ctx, stdpath.Join(dst, sub), fsArgs)
|
||||
if err == nil {
|
||||
tmp, err = utils.SliceConvert(tmp, func(obj model.Obj) (model.Obj, error) {
|
||||
thumb, ok := model.GetThumb(obj)
|
||||
objRes := model.Object{
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
}
|
||||
if !ok {
|
||||
return &objRes, nil
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
if err == nil {
|
||||
objs = append(objs, tmp...)
|
||||
}
|
||||
@ -113,43 +141,50 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
// proxy || ftp,s3
|
||||
if common.GetApiUrl(ctx) == "" {
|
||||
args.Redirect = false
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
reqPath := stdpath.Join(dst, sub)
|
||||
link, file, err := d.link(ctx, reqPath, args)
|
||||
link, fi, err := d.link(ctx, reqPath, args)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var resultLink *model.Link
|
||||
if link != nil {
|
||||
resultLink = &model.Link{
|
||||
URL: link.URL,
|
||||
Header: link.Header,
|
||||
RangeReader: link.RangeReader,
|
||||
SyncClosers: utils.NewSyncClosers(link),
|
||||
ContentLength: link.ContentLength,
|
||||
}
|
||||
if link.MFile != nil {
|
||||
resultLink.RangeReader = &model.FileRangeReader{
|
||||
RangeReaderIF: stream.GetRangeReaderFromMFile(file.GetSize(), link.MFile),
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
resultLink = &model.Link{
|
||||
if link == nil {
|
||||
// 重定向且需要通过代理
|
||||
return &model.Link{
|
||||
URL: fmt.Sprintf("%s/p%s?sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(reqPath, true),
|
||||
sign.Sign(reqPath)),
|
||||
}
|
||||
|
||||
}, nil
|
||||
}
|
||||
if !args.Redirect {
|
||||
if d.DownloadConcurrency > 0 {
|
||||
resultLink.Concurrency = d.DownloadConcurrency
|
||||
}
|
||||
if d.DownloadPartSize > 0 {
|
||||
resultLink.PartSize = d.DownloadPartSize * utils.KB
|
||||
}
|
||||
if args.Redirect {
|
||||
return link, nil
|
||||
}
|
||||
|
||||
resultLink := &model.Link{
|
||||
URL: link.URL,
|
||||
Header: link.Header,
|
||||
RangeReader: link.RangeReader,
|
||||
MFile: link.MFile,
|
||||
Concurrency: link.Concurrency,
|
||||
PartSize: link.PartSize,
|
||||
ContentLength: link.ContentLength,
|
||||
SyncClosers: utils.NewSyncClosers(link),
|
||||
}
|
||||
if resultLink.ContentLength == 0 {
|
||||
resultLink.ContentLength = fi.GetSize()
|
||||
}
|
||||
if resultLink.MFile != nil {
|
||||
return resultLink, nil
|
||||
}
|
||||
if d.DownloadConcurrency > 0 {
|
||||
resultLink.Concurrency = d.DownloadConcurrency
|
||||
}
|
||||
if d.DownloadPartSize > 0 {
|
||||
resultLink.PartSize = d.DownloadPartSize * utils.KB
|
||||
}
|
||||
return resultLink, nil
|
||||
}
|
||||
|
@ -54,55 +54,12 @@ func (d *Alias) getRootAndPath(path string) (string, string) {
|
||||
return parts[0], parts[1]
|
||||
}
|
||||
|
||||
func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Obj, error) {
|
||||
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.Object{
|
||||
Path: path,
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Alias) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]model.Obj, error) {
|
||||
objs, err := fs.List(ctx, stdpath.Join(dst, sub), args)
|
||||
// the obj must implement the model.SetPath interface
|
||||
// return objs, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(objs, func(obj model.Obj) (model.Obj, error) {
|
||||
thumb, ok := model.GetThumb(obj)
|
||||
objRes := model.Object{
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
}
|
||||
if !ok {
|
||||
return &objRes, nil
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Alias) link(ctx context.Context, reqPath string, args model.LinkArgs) (*model.Link, model.Obj, error) {
|
||||
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// proxy || ftp,s3
|
||||
if !args.Redirect || len(common.GetApiUrl(ctx)) == 0 {
|
||||
if !args.Redirect {
|
||||
return op.Link(ctx, storage, reqActualPath, args)
|
||||
}
|
||||
obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||
|
@ -297,11 +297,10 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||
if d.InternalUpload {
|
||||
url = partInfo.InternalUploadUrl
|
||||
}
|
||||
req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT))
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, io.LimitReader(rateLimited, DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -3,7 +3,6 @@ package aliyundrive_open
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"time"
|
||||
@ -13,7 +12,6 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/rateg"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -24,9 +22,8 @@ type AliyundriveOpen struct {
|
||||
|
||||
DriveId string
|
||||
|
||||
limitList func(ctx context.Context, data base.Json) (*Files, error)
|
||||
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
|
||||
ref *AliyundriveOpen
|
||||
limiter *limiter
|
||||
ref *AliyundriveOpen
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Config() driver.Config {
|
||||
@ -38,25 +35,23 @@ func (d *AliyundriveOpen) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Init(ctx context.Context) error {
|
||||
d.limiter = getLimiterForUser(globalLimiterUserID) // First create a globally shared limiter to limit the initial requests.
|
||||
if d.LIVPDownloadFormat == "" {
|
||||
d.LIVPDownloadFormat = "jpeg"
|
||||
}
|
||||
if d.DriveType == "" {
|
||||
d.DriveType = "default"
|
||||
}
|
||||
res, err := d.request("/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
|
||||
res, err := d.request(ctx, limiterOther, "/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
|
||||
if err != nil {
|
||||
d.limiter.free()
|
||||
d.limiter = nil
|
||||
return err
|
||||
}
|
||||
d.DriveId = utils.Json.Get(res, d.DriveType+"_drive_id").ToString()
|
||||
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
|
||||
Limit: 4,
|
||||
Bucket: 1,
|
||||
})
|
||||
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
|
||||
Limit: 1,
|
||||
Bucket: 1,
|
||||
})
|
||||
userid := utils.Json.Get(res, "user_id").ToString()
|
||||
d.limiter.free()
|
||||
d.limiter = getLimiterForUser(userid) // Allocate a corresponding limiter for each user.
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -70,6 +65,8 @@ func (d *AliyundriveOpen) InitReference(storage driver.Driver) error {
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Drop(ctx context.Context) error {
|
||||
d.limiter.free()
|
||||
d.limiter = nil
|
||||
d.ref = nil
|
||||
return nil
|
||||
}
|
||||
@ -87,9 +84,6 @@ func (d *AliyundriveOpen) GetRoot(ctx context.Context) (model.Obj, error) {
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if d.limitList == nil {
|
||||
return nil, fmt.Errorf("driver not init")
|
||||
}
|
||||
files, err := d.getFiles(ctx, dir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -107,8 +101,8 @@ func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.Li
|
||||
return objs, err
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link, error) {
|
||||
res, err := d.request("/adrive/v1.0/openFile/getDownloadUrl", http.MethodPost, func(req *resty.Request) {
|
||||
func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
res, err := d.request(ctx, limiterLink, "/adrive/v1.0/openFile/getDownloadUrl", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": file.GetID(),
|
||||
@ -132,17 +126,10 @@ func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if d.limitLink == nil {
|
||||
return nil, fmt.Errorf("driver not init")
|
||||
}
|
||||
return d.limitLink(ctx, file)
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
nowTime, _ := getNowTime()
|
||||
newDir := File{CreatedAt: nowTime, UpdatedAt: nowTime}
|
||||
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"parent_file_id": parentDir.GetID(),
|
||||
@ -168,7 +155,7 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
|
||||
|
||||
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var resp MoveOrCopyResp
|
||||
_, err := d.request("/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": srcObj.GetID(),
|
||||
@ -198,7 +185,7 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (m
|
||||
|
||||
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
var newFile File
|
||||
_, err := d.request("/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": srcObj.GetID(),
|
||||
@ -230,7 +217,7 @@ func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName
|
||||
|
||||
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
var resp MoveOrCopyResp
|
||||
_, err := d.request("/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": srcObj.GetID(),
|
||||
@ -256,7 +243,7 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if d.RemoveWay == "delete" {
|
||||
uri = "/adrive/v1.0/openFile/delete"
|
||||
}
|
||||
_, err := d.request(uri, http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, uri, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": obj.GetID(),
|
||||
@ -295,7 +282,7 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
|
||||
default:
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
_, err := d.request(uri, http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, uri, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetResult(&resp)
|
||||
})
|
||||
if err != nil {
|
||||
|
96
drivers/aliyundrive_open/limiter.go
Normal file
96
drivers/aliyundrive_open/limiter.go
Normal file
@ -0,0 +1,96 @@
|
||||
package aliyundrive_open
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
// See document https://www.yuque.com/aliyundrive/zpfszx/mqocg38hlxzc5vcd
|
||||
// See issue https://github.com/OpenListTeam/OpenList/issues/724
|
||||
// We got limit per user per app, so the limiter should be global.
|
||||
|
||||
type limiterType int
|
||||
|
||||
const (
|
||||
limiterList limiterType = iota
|
||||
limiterLink
|
||||
limiterOther
|
||||
)
|
||||
|
||||
const (
|
||||
listRateLimit = 3.9 // 4 per second in document, but we use 3.9 per second to be safe
|
||||
linkRateLimit = 0.9 // 1 per second in document, but we use 0.9 per second to be safe
|
||||
otherRateLimit = 14.9 // 15 per second in document, but we use 14.9 per second to be safe
|
||||
globalLimiterUserID = "" // Global limiter user ID, used to limit the initial requests.
|
||||
)
|
||||
|
||||
type limiter struct {
|
||||
usedBy int
|
||||
list *rate.Limiter
|
||||
link *rate.Limiter
|
||||
other *rate.Limiter
|
||||
}
|
||||
|
||||
var limiters = make(map[string]*limiter)
|
||||
var limitersLock = &sync.Mutex{}
|
||||
|
||||
func getLimiterForUser(userid string) *limiter {
|
||||
limitersLock.Lock()
|
||||
defer limitersLock.Unlock()
|
||||
defer func() {
|
||||
// Clean up limiters that are no longer used.
|
||||
for id, lim := range limiters {
|
||||
if lim.usedBy <= 0 && id != globalLimiterUserID { // Do not delete the global limiter.
|
||||
delete(limiters, id)
|
||||
}
|
||||
}
|
||||
}()
|
||||
if lim, ok := limiters[userid]; ok {
|
||||
lim.usedBy++
|
||||
return lim
|
||||
}
|
||||
lim := &limiter{
|
||||
usedBy: 1,
|
||||
list: rate.NewLimiter(rate.Limit(listRateLimit), 1),
|
||||
link: rate.NewLimiter(rate.Limit(linkRateLimit), 1),
|
||||
other: rate.NewLimiter(rate.Limit(otherRateLimit), 1),
|
||||
}
|
||||
limiters[userid] = lim
|
||||
return lim
|
||||
}
|
||||
|
||||
func (l *limiter) wait(ctx context.Context, typ limiterType) error {
|
||||
if l == nil {
|
||||
return fmt.Errorf("driver not init")
|
||||
}
|
||||
switch typ {
|
||||
case limiterList:
|
||||
return l.list.Wait(ctx)
|
||||
case limiterLink:
|
||||
return l.link.Wait(ctx)
|
||||
case limiterOther:
|
||||
return l.other.Wait(ctx)
|
||||
default:
|
||||
return fmt.Errorf("unknown limiter type")
|
||||
}
|
||||
}
|
||||
func (l *limiter) free() {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
limitersLock.Lock()
|
||||
defer limitersLock.Unlock()
|
||||
l.usedBy--
|
||||
}
|
||||
func (d *AliyundriveOpen) wait(ctx context.Context, typ limiterType) error {
|
||||
if d == nil {
|
||||
return fmt.Errorf("driver not init")
|
||||
}
|
||||
if d.ref != nil {
|
||||
return d.ref.wait(ctx, typ) // If this is a reference driver, wait on the reference driver.
|
||||
}
|
||||
return d.limiter.wait(ctx, typ)
|
||||
}
|
@ -50,10 +50,10 @@ func calPartSize(fileSize int64) int64 {
|
||||
return partSize
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]PartInfo, error) {
|
||||
func (d *AliyundriveOpen) getUploadUrl(ctx context.Context, count int, fileId, uploadId string) ([]PartInfo, error) {
|
||||
partInfoList := makePartInfos(count)
|
||||
var resp CreateResp
|
||||
_, err := d.request("/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": fileId,
|
||||
@ -69,7 +69,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
|
||||
if d.InternalUpload {
|
||||
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "PUT", uploadUrl, r)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -84,10 +84,10 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) completeUpload(fileId, uploadId string) (model.Obj, error) {
|
||||
func (d *AliyundriveOpen) completeUpload(ctx context.Context, fileId, uploadId string) (model.Obj, error) {
|
||||
// 3. complete
|
||||
var newFile File
|
||||
_, err := d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": fileId,
|
||||
@ -137,11 +137,8 @@ func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error
|
||||
}
|
||||
buf := make([]byte, length)
|
||||
n, err := io.ReadFull(reader, buf)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return "", fmt.Errorf("can't read data, expected=%d, got=%d", len(buf), n)
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
if n != int(length) {
|
||||
return "", fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(buf), nil
|
||||
}
|
||||
@ -183,7 +180,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||
createData["pre_hash"] = hash
|
||||
}
|
||||
var createResp CreateResp
|
||||
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
_, err, e := d.requestReturnErrResp(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(createData).SetResult(&createResp)
|
||||
})
|
||||
if err != nil {
|
||||
@ -210,7 +207,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cal proof code error: %s", err.Error())
|
||||
}
|
||||
_, err = d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
_, err = d.request(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(createData).SetResult(&createResp)
|
||||
})
|
||||
if err != nil {
|
||||
@ -225,13 +222,17 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||
preTime := time.Now()
|
||||
var offset, length int64 = 0, partSize
|
||||
//var length
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < len(createResp.PartInfoList); i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
// refresh upload url if 50 minutes passed
|
||||
if time.Since(preTime) > 50*time.Minute {
|
||||
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
|
||||
createResp.PartInfoList, err = d.getUploadUrl(ctx, count, createResp.FileId, createResp.UploadId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -240,22 +241,19 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||
if remain := stream.GetSize() - offset; length > remain {
|
||||
length = remain
|
||||
}
|
||||
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
||||
if rapidUpload {
|
||||
srd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rd = utils.NewMultiReadable(srd)
|
||||
rd, err := ss.GetSectionReader(offset, length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
||||
err = retry.Do(func() error {
|
||||
_ = rd.Reset()
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
||||
rd.Seek(0, io.SeekStart)
|
||||
return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i])
|
||||
},
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second))
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -268,5 +266,5 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||
|
||||
log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp)
|
||||
// 3. complete
|
||||
return d.completeUpload(createResp.FileId, createResp.UploadId)
|
||||
return d.completeUpload(ctx, createResp.FileId, createResp.UploadId)
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
|
||||
func (d *AliyundriveOpen) _refreshToken(ctx context.Context) (string, string, error) {
|
||||
if d.UseOnlineAPI && d.APIAddress != "" {
|
||||
u := d.APIAddress
|
||||
var resp struct {
|
||||
@ -27,14 +27,17 @@ func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
|
||||
AccessToken string `json:"access_token"`
|
||||
ErrorMessage string `json:"text"`
|
||||
}
|
||||
|
||||
|
||||
// 根据AlipanType选项设置driver_txt
|
||||
driverTxt := "alicloud_qr"
|
||||
if d.AlipanType == "alipanTV" {
|
||||
driverTxt = "alicloud_tv"
|
||||
}
|
||||
|
||||
_, err := base.RestyClient.R().
|
||||
err := d.wait(ctx, limiterOther)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
_, err = base.RestyClient.R().
|
||||
SetHeader("User-Agent", "Mozilla/5.0 (Macintosh; Apple macOS 15_5) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36 Chrome/138.0.0.0 Openlist/425.6.30").
|
||||
SetResult(&resp).
|
||||
SetQueryParams(map[string]string{
|
||||
@ -54,11 +57,14 @@ func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
|
||||
}
|
||||
return resp.RefreshToken, resp.AccessToken, nil
|
||||
}
|
||||
|
||||
// 本地刷新逻辑,必须要求 client_id 和 client_secret
|
||||
if d.ClientID == "" || d.ClientSecret == "" {
|
||||
return "", "", fmt.Errorf("empty ClientID or ClientSecret")
|
||||
}
|
||||
err := d.wait(ctx, limiterOther)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
url := API_URL + "/oauth/access_token"
|
||||
//var resp base.TokenResp
|
||||
var e ErrResp
|
||||
@ -110,18 +116,18 @@ func getSub(token string) (string, error) {
|
||||
return utils.Json.Get(bs, "sub").ToString(), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) refreshToken() error {
|
||||
func (d *AliyundriveOpen) refreshToken(ctx context.Context) error {
|
||||
if d.ref != nil {
|
||||
return d.ref.refreshToken()
|
||||
return d.ref.refreshToken(ctx)
|
||||
}
|
||||
refresh, access, err := d._refreshToken()
|
||||
refresh, access, err := d._refreshToken(ctx)
|
||||
for i := 0; i < 3; i++ {
|
||||
if err == nil {
|
||||
break
|
||||
} else {
|
||||
log.Errorf("[ali_open] failed to refresh token: %s", err)
|
||||
}
|
||||
refresh, access, err = d._refreshToken()
|
||||
refresh, access, err = d._refreshToken(ctx)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@ -132,12 +138,12 @@ func (d *AliyundriveOpen) refreshToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
|
||||
b, err, _ := d.requestReturnErrResp(uri, method, callback, retry...)
|
||||
func (d *AliyundriveOpen) request(ctx context.Context, limitTy limiterType, uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
|
||||
b, err, _ := d.requestReturnErrResp(ctx, limitTy, uri, method, callback, retry...)
|
||||
return b, err
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
|
||||
func (d *AliyundriveOpen) requestReturnErrResp(ctx context.Context, limitTy limiterType, uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
|
||||
req := base.RestyClient.R()
|
||||
// TODO check whether access_token is expired
|
||||
req.SetHeader("Authorization", "Bearer "+d.getAccessToken())
|
||||
@ -149,6 +155,10 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
|
||||
}
|
||||
var e ErrResp
|
||||
req.SetError(&e)
|
||||
err := d.wait(ctx, limitTy)
|
||||
if err != nil {
|
||||
return nil, err, nil
|
||||
}
|
||||
res, err := req.Execute(method, API_URL+uri)
|
||||
if err != nil {
|
||||
if res != nil {
|
||||
@ -159,11 +169,11 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
|
||||
isRetry := len(retry) > 0 && retry[0]
|
||||
if e.Code != "" {
|
||||
if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.getAccessToken() == "") {
|
||||
err = d.refreshToken()
|
||||
err = d.refreshToken(ctx)
|
||||
if err != nil {
|
||||
return nil, err, nil
|
||||
}
|
||||
return d.requestReturnErrResp(uri, method, callback, true)
|
||||
return d.requestReturnErrResp(ctx, limitTy, uri, method, callback, true)
|
||||
}
|
||||
return nil, fmt.Errorf("%s:%s", e.Code, e.Message), &e
|
||||
}
|
||||
@ -172,7 +182,7 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
|
||||
|
||||
func (d *AliyundriveOpen) list(ctx context.Context, data base.Json) (*Files, error) {
|
||||
var resp Files
|
||||
_, err := d.request("/adrive/v1.0/openFile/list", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterList, "/adrive/v1.0/openFile/list", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetResult(&resp)
|
||||
})
|
||||
if err != nil {
|
||||
@ -201,7 +211,7 @@ func (d *AliyundriveOpen) getFiles(ctx context.Context, fileId string) ([]File,
|
||||
//"video_thumbnail_width": 480,
|
||||
//"image_thumbnail_width": 480,
|
||||
}
|
||||
resp, err := d.limitList(ctx, data)
|
||||
resp, err := d.list(ctx, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -2,7 +2,6 @@ package aliyundrive_share
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
@ -12,7 +11,6 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/rateg"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -25,8 +23,7 @@ type AliyundriveShare struct {
|
||||
DriveId string
|
||||
cron *cron.Cron
|
||||
|
||||
limitList func(ctx context.Context, dir model.Obj) ([]model.Obj, error)
|
||||
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
|
||||
limiter *limiter
|
||||
}
|
||||
|
||||
func (d *AliyundriveShare) Config() driver.Config {
|
||||
@ -38,29 +35,26 @@ func (d *AliyundriveShare) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *AliyundriveShare) Init(ctx context.Context) error {
|
||||
err := d.refreshToken()
|
||||
d.limiter = getLimiter()
|
||||
err := d.refreshToken(ctx)
|
||||
if err != nil {
|
||||
d.limiter.free()
|
||||
d.limiter = nil
|
||||
return err
|
||||
}
|
||||
err = d.getShareToken()
|
||||
err = d.getShareToken(ctx)
|
||||
if err != nil {
|
||||
d.limiter.free()
|
||||
d.limiter = nil
|
||||
return err
|
||||
}
|
||||
d.cron = cron.NewCron(time.Hour * 2)
|
||||
d.cron.Do(func() {
|
||||
err := d.refreshToken()
|
||||
err := d.refreshToken(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
}
|
||||
})
|
||||
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
|
||||
Limit: 4,
|
||||
Bucket: 1,
|
||||
})
|
||||
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
|
||||
Limit: 1,
|
||||
Bucket: 1,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -68,19 +62,14 @@ func (d *AliyundriveShare) Drop(ctx context.Context) error {
|
||||
if d.cron != nil {
|
||||
d.cron.Stop()
|
||||
}
|
||||
d.limiter.free()
|
||||
d.limiter = nil
|
||||
d.DriveId = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if d.limitList == nil {
|
||||
return nil, fmt.Errorf("driver not init")
|
||||
}
|
||||
return d.limitList(ctx, dir)
|
||||
}
|
||||
|
||||
func (d *AliyundriveShare) list(ctx context.Context, dir model.Obj) ([]model.Obj, error) {
|
||||
files, err := d.getFiles(dir.GetID())
|
||||
files, err := d.getFiles(ctx, dir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -90,13 +79,6 @@ func (d *AliyundriveShare) list(ctx context.Context, dir model.Obj) ([]model.Obj
|
||||
}
|
||||
|
||||
func (d *AliyundriveShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if d.limitLink == nil {
|
||||
return nil, fmt.Errorf("driver not init")
|
||||
}
|
||||
return d.limitLink(ctx, file)
|
||||
}
|
||||
|
||||
func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Link, error) {
|
||||
data := base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": file.GetID(),
|
||||
@ -105,7 +87,7 @@ func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Lin
|
||||
"share_id": d.ShareId,
|
||||
}
|
||||
var resp ShareLinkResp
|
||||
_, err := d.request("https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterLink, "https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetHeader(CanaryHeaderKey, CanaryHeaderValue).SetBody(data).SetResult(&resp)
|
||||
})
|
||||
if err != nil {
|
||||
@ -135,7 +117,7 @@ func (d *AliyundriveShare) Other(ctx context.Context, args model.OtherArgs) (int
|
||||
default:
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
_, err := d.request(url, http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, url, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetResult(&resp)
|
||||
})
|
||||
if err != nil {
|
||||
|
67
drivers/aliyundrive_share/limiter.go
Normal file
67
drivers/aliyundrive_share/limiter.go
Normal file
@ -0,0 +1,67 @@
|
||||
package aliyundrive_share
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
// See issue https://github.com/OpenListTeam/OpenList/issues/724
|
||||
// Seems there is no limit per user.
|
||||
|
||||
type limiterType int
|
||||
|
||||
const (
|
||||
limiterList limiterType = iota
|
||||
limiterLink
|
||||
limiterOther
|
||||
)
|
||||
|
||||
const (
|
||||
listRateLimit = 3.9 // 4 per second in document, but we use 3.9 per second to be safe
|
||||
linkRateLimit = 0.9 // 1 per second in document, but we use 0.9 per second to be safe
|
||||
otherRateLimit = 14.9 // 15 per second in document, but we use 14.9 per second to be safe
|
||||
)
|
||||
|
||||
type limiter struct {
|
||||
list *rate.Limiter
|
||||
link *rate.Limiter
|
||||
other *rate.Limiter
|
||||
}
|
||||
|
||||
func getLimiter() *limiter {
|
||||
return &limiter{
|
||||
list: rate.NewLimiter(rate.Limit(listRateLimit), 1),
|
||||
link: rate.NewLimiter(rate.Limit(linkRateLimit), 1),
|
||||
other: rate.NewLimiter(rate.Limit(otherRateLimit), 1),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *limiter) wait(ctx context.Context, typ limiterType) error {
|
||||
if l == nil {
|
||||
return fmt.Errorf("driver not init")
|
||||
}
|
||||
switch typ {
|
||||
case limiterList:
|
||||
return l.list.Wait(ctx)
|
||||
case limiterLink:
|
||||
return l.link.Wait(ctx)
|
||||
case limiterOther:
|
||||
return l.other.Wait(ctx)
|
||||
default:
|
||||
return fmt.Errorf("unknown limiter type")
|
||||
}
|
||||
}
|
||||
func (l *limiter) free() {
|
||||
|
||||
}
|
||||
func (d *AliyundriveShare) wait(ctx context.Context, typ limiterType) error {
|
||||
if d == nil {
|
||||
return fmt.Errorf("driver not init")
|
||||
}
|
||||
//if d.ref != nil {
|
||||
// return d.ref.wait(ctx, typ) // If this is a reference driver, wait on the reference driver.
|
||||
//}
|
||||
return d.limiter.wait(ctx, typ)
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
package aliyundrive_share
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
@ -15,11 +16,15 @@ const (
|
||||
CanaryHeaderValue = "client=web,app=share,version=v2.3.1"
|
||||
)
|
||||
|
||||
func (d *AliyundriveShare) refreshToken() error {
|
||||
func (d *AliyundriveShare) refreshToken(ctx context.Context) error {
|
||||
err := d.wait(ctx, limiterOther)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
url := "https://auth.alipan.com/v2/account/token"
|
||||
var resp base.TokenResp
|
||||
var e ErrorResp
|
||||
_, err := base.RestyClient.R().
|
||||
_, err = base.RestyClient.R().
|
||||
SetBody(base.Json{"refresh_token": d.RefreshToken, "grant_type": "refresh_token"}).
|
||||
SetResult(&resp).
|
||||
SetError(&e).
|
||||
@ -36,7 +41,11 @@ func (d *AliyundriveShare) refreshToken() error {
|
||||
}
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
func (d *AliyundriveShare) getShareToken() error {
|
||||
func (d *AliyundriveShare) getShareToken(ctx context.Context) error {
|
||||
err := d.wait(ctx, limiterOther)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data := base.Json{
|
||||
"share_id": d.ShareId,
|
||||
}
|
||||
@ -45,7 +54,7 @@ func (d *AliyundriveShare) getShareToken() error {
|
||||
}
|
||||
var e ErrorResp
|
||||
var resp ShareTokenResp
|
||||
_, err := base.RestyClient.R().
|
||||
_, err = base.RestyClient.R().
|
||||
SetResult(&resp).SetError(&e).SetBody(data).
|
||||
Post("https://api.alipan.com/v2/share_link/get_share_token")
|
||||
if err != nil {
|
||||
@ -58,7 +67,7 @@ func (d *AliyundriveShare) getShareToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback) ([]byte, error) {
|
||||
func (d *AliyundriveShare) request(ctx context.Context, limitTy limiterType, url, method string, callback base.ReqCallback) ([]byte, error) {
|
||||
var e ErrorResp
|
||||
req := base.RestyClient.R().
|
||||
SetError(&e).
|
||||
@ -71,6 +80,10 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
|
||||
} else {
|
||||
req.SetBody("{}")
|
||||
}
|
||||
err := d.wait(ctx, limitTy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := req.Execute(method, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -78,14 +91,14 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
|
||||
if e.Code != "" {
|
||||
if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" {
|
||||
if e.Code == "AccessTokenInvalid" {
|
||||
err = d.refreshToken()
|
||||
err = d.refreshToken(ctx)
|
||||
} else {
|
||||
err = d.getShareToken()
|
||||
err = d.getShareToken(ctx)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.request(url, method, callback)
|
||||
return d.request(ctx, limitTy, url, method, callback)
|
||||
} else {
|
||||
return nil, errors.New(e.Code + ": " + e.Message)
|
||||
}
|
||||
@ -93,7 +106,7 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
|
||||
return resp.Body(), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
|
||||
func (d *AliyundriveShare) getFiles(ctx context.Context, fileId string) ([]File, error) {
|
||||
files := make([]File, 0)
|
||||
data := base.Json{
|
||||
"image_thumbnail_process": "image/resize,w_160/format,jpeg",
|
||||
@ -110,6 +123,10 @@ func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
|
||||
if data["marker"] == "first" {
|
||||
data["marker"] = ""
|
||||
}
|
||||
err := d.wait(ctx, limiterList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var e ErrorResp
|
||||
var resp ListResp
|
||||
res, err := base.RestyClient.R().
|
||||
@ -123,11 +140,11 @@ func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
|
||||
log.Debugf("aliyundrive share get files: %s", res.String())
|
||||
if e.Code != "" {
|
||||
if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" {
|
||||
err = d.getShareToken()
|
||||
err = d.getShareToken(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.getFiles(fileId)
|
||||
return d.getFiles(ctx, fileId)
|
||||
}
|
||||
return nil, errors.New(e.Message)
|
||||
}
|
||||
|
@ -203,11 +203,12 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
|
||||
streamSize := stream.GetSize()
|
||||
sliceSize := d.getSliceSize(streamSize)
|
||||
count := int(streamSize / sliceSize)
|
||||
count := 1
|
||||
if streamSize > sliceSize {
|
||||
count = int((streamSize + sliceSize - 1) / sliceSize)
|
||||
}
|
||||
lastBlockSize := streamSize % sliceSize
|
||||
if lastBlockSize > 0 {
|
||||
count++
|
||||
} else {
|
||||
if lastBlockSize == 0 {
|
||||
lastBlockSize = sliceSize
|
||||
}
|
||||
|
||||
|
@ -262,11 +262,12 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
|
||||
// 计算需要的数据
|
||||
streamSize := stream.GetSize()
|
||||
count := int(streamSize / DEFAULT)
|
||||
count := 1
|
||||
if streamSize > DEFAULT {
|
||||
count = int((streamSize + DEFAULT - 1) / DEFAULT)
|
||||
}
|
||||
lastBlockSize := streamSize % DEFAULT
|
||||
if lastBlockSize > 0 {
|
||||
count++
|
||||
} else {
|
||||
if lastBlockSize == 0 {
|
||||
lastBlockSize = DEFAULT
|
||||
}
|
||||
|
||||
|
@ -255,7 +255,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
|
||||
},
|
||||
UpdateProgress: up,
|
||||
})
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, "https://pan-yz.chaoxing.com/upload", r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ func (d *ChaoXing) Login() (string, error) {
|
||||
return "", err
|
||||
}
|
||||
// Create the request
|
||||
req, err := http.NewRequest("POST", "https://passport2.chaoxing.com/fanyalogin", body)
|
||||
req, err := http.NewRequest(http.MethodPost, "https://passport2.chaoxing.com/fanyalogin", body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/cookie"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
@ -241,23 +242,26 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
left := stream.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
err := retry.Do(
|
||||
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
rd, err := ss.GetSectionReader(finish, byteSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = retry.Do(
|
||||
func() error {
|
||||
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rd.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk),
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
driver.NewLimitedUploadStream(ctx, rd))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -290,6 +294,7 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -304,23 +309,25 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
||||
uploadUrl := u.UploadURLs[0]
|
||||
var finish int64 = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
left := stream.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
err := retry.Do(
|
||||
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
rd, err := ss.GetSectionReader(finish, byteSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = retry.Do(
|
||||
func() error {
|
||||
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl,
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
rd.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -346,6 +353,7 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -363,23 +371,26 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
|
||||
var chunk int = 0
|
||||
var etags []string
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
left := stream.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
err := retry.Do(
|
||||
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
rd, err := ss.GetSectionReader(finish, byteSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = retry.Do(
|
||||
func() error {
|
||||
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rd.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadURLs[chunk],
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
driver.NewLimitedUploadStream(ctx, rd))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -404,6 +415,7 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
@ -256,23 +257,26 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
left := file.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
err := retry.Do(
|
||||
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
rd, err := ss.GetSectionReader(finish, byteSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = retry.Do(
|
||||
func() error {
|
||||
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(file, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rd.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk),
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
driver.NewLimitedUploadStream(ctx, rd))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -305,6 +309,7 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -319,23 +324,25 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
|
||||
uploadUrl := u.UploadUrls[0]
|
||||
var finish int64 = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
left := file.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
err := retry.Do(
|
||||
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
rd, err := ss.GetSectionReader(finish, byteSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = retry.Do(
|
||||
func() error {
|
||||
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(file, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl,
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
rd.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -362,6 +369,7 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -379,23 +387,26 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
|
||||
var chunk int = 0
|
||||
var etags []string
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
left := file.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
err := retry.Do(
|
||||
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
rd, err := ss.GetSectionReader(finish, byteSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = retry.Do(
|
||||
func() error {
|
||||
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(file, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rd.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadUrls[chunk],
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
driver.NewLimitedUploadStream(ctx, rd))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -421,6 +432,7 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -292,10 +292,10 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
|
||||
if offset == 0 && limit > 0 {
|
||||
fileHeader = make([]byte, fileHeaderSize)
|
||||
n, _ := io.ReadFull(remoteReader, fileHeader)
|
||||
n, err := io.ReadFull(remoteReader, fileHeader)
|
||||
if n != fileHeaderSize {
|
||||
fileHeader = nil
|
||||
return nil, fmt.Errorf("can't read data, expected=%d, got=%d", fileHeaderSize, n)
|
||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", fileHeaderSize, n, err)
|
||||
}
|
||||
if limit <= fileHeaderSize {
|
||||
remoteReader.Close()
|
||||
|
@ -236,7 +236,7 @@ func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
|
||||
|
||||
// 根据文件大小选择上传方式
|
||||
if file.GetSize() <= 1*utils.MB { // 小于1MB,使用普通模式上传
|
||||
return d.Upload(&uploadConfig, dstDir, file, up, dataType)
|
||||
return d.Upload(ctx, &uploadConfig, dstDir, file, up, dataType)
|
||||
}
|
||||
// 大文件使用分片上传
|
||||
return d.UploadByMultipart(ctx, &uploadConfig, file.GetSize(), dstDir, file, up, dataType)
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
@ -447,41 +448,66 @@ func (d *Doubao) uploadNode(uploadConfig *UploadConfig, dir model.Obj, file mode
|
||||
}
|
||||
|
||||
// Upload 普通上传实现
|
||||
func (d *Doubao) Upload(config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
|
||||
data, err := io.ReadAll(file)
|
||||
func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
|
||||
ss, err := stream.NewStreamSectionReader(file, int(file.GetSize()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reader, err := ss.GetSectionReader(0, file.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 计算CRC32
|
||||
crc32Hash := crc32.NewIEEE()
|
||||
crc32Hash.Write(data)
|
||||
w, err := utils.CopyWithBuffer(crc32Hash, reader)
|
||||
if w != file.GetSize() {
|
||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", file.GetSize(), w, err)
|
||||
}
|
||||
crc32Value := hex.EncodeToString(crc32Hash.Sum(nil))
|
||||
|
||||
// 构建请求路径
|
||||
uploadNode := config.InnerUploadAddress.UploadNodes[0]
|
||||
storeInfo := uploadNode.StoreInfos[0]
|
||||
uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI)
|
||||
|
||||
uploadResp := UploadResp{}
|
||||
|
||||
if _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
|
||||
req.SetHeaders(map[string]string{
|
||||
"Content-Type": "application/octet-stream",
|
||||
"Content-Crc32": crc32Value,
|
||||
"Content-Length": fmt.Sprintf("%d", len(data)),
|
||||
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
|
||||
})
|
||||
|
||||
req.SetBody(data)
|
||||
}, &uploadResp); err != nil {
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, reader)
|
||||
err = d._retryOperation("Upload", func() error {
|
||||
reader.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl, rateLimitedRd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header = map[string][]string{
|
||||
"Referer": {BaseURL + "/"},
|
||||
"Origin": {BaseURL},
|
||||
"User-Agent": {UserAgent},
|
||||
"X-Storage-U": {d.UserId},
|
||||
"Authorization": {storeInfo.Auth},
|
||||
"Content-Type": {"application/octet-stream"},
|
||||
"Content-Crc32": {crc32Value},
|
||||
"Content-Length": {fmt.Sprintf("%d", file.GetSize())},
|
||||
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
|
||||
}
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
bytes, _ := io.ReadAll(res.Body)
|
||||
resp := UploadResp{}
|
||||
utils.Json.Unmarshal(bytes, &resp)
|
||||
if resp.Code != 2000 {
|
||||
return fmt.Errorf("upload part failed: %s", resp.Message)
|
||||
} else if resp.Data.Crc32 != crc32Value {
|
||||
return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, resp.Data.Crc32)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
ss.RecycleSectionReader(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uploadResp.Code != 2000 {
|
||||
return nil, fmt.Errorf("upload failed: %s", uploadResp.Message)
|
||||
}
|
||||
|
||||
uploadNodeResp, err := d.uploadNode(config, dstDir, file, dataType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -519,65 +545,104 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
|
||||
totalParts := (fileSize + chunkSize - 1) / chunkSize
|
||||
// 创建分片信息组
|
||||
parts := make([]UploadPart, totalParts)
|
||||
// 缓存文件
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
|
||||
// 用 stream.NewStreamSectionReader 替代缓存临时文件
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to cache file: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
up(10.0) // 更新进度
|
||||
// 设置并行上传
|
||||
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||
retry.Attempts(1),
|
||||
thread := min(int(totalParts), d.uploadThread)
|
||||
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
||||
retry.Attempts(MaxRetryAttempts),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.MaxJitter(200*time.Millisecond),
|
||||
)
|
||||
|
||||
var partsMutex sync.Mutex
|
||||
// 并行上传所有分片
|
||||
for partIndex := int64(0); partIndex < totalParts; partIndex++ {
|
||||
hash := crc32.NewIEEE()
|
||||
for partIndex := range totalParts {
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
break
|
||||
}
|
||||
partIndex := partIndex
|
||||
partNumber := partIndex + 1 // 分片编号从1开始
|
||||
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
// 计算此分片的大小和偏移
|
||||
offset := partIndex * chunkSize
|
||||
size := chunkSize
|
||||
if partIndex == totalParts-1 {
|
||||
size = fileSize - offset
|
||||
}
|
||||
|
||||
limitedReader := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, size))
|
||||
// 读取数据到内存
|
||||
data, err := io.ReadAll(limitedReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read part %d: %w", partNumber, err)
|
||||
}
|
||||
// 计算CRC32
|
||||
crc32Value := calculateCRC32(data)
|
||||
// 使用_retryOperation上传分片
|
||||
var uploadPart UploadPart
|
||||
if err = d._retryOperation(fmt.Sprintf("Upload part %d", partNumber), func() error {
|
||||
var err error
|
||||
uploadPart, err = d.uploadPart(config, uploadUrl, uploadID, partNumber, data, crc32Value)
|
||||
return err
|
||||
}); err != nil {
|
||||
return fmt.Errorf("part %d upload failed: %w", partNumber, err)
|
||||
}
|
||||
// 记录成功上传的分片
|
||||
partsMutex.Lock()
|
||||
parts[partIndex] = UploadPart{
|
||||
PartNumber: strconv.FormatInt(partNumber, 10),
|
||||
Etag: uploadPart.Etag,
|
||||
Crc32: crc32Value,
|
||||
}
|
||||
partsMutex.Unlock()
|
||||
// 更新进度
|
||||
progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts)
|
||||
up(math.Min(progress, 95.0))
|
||||
|
||||
return nil
|
||||
// 计算此分片的大小和偏移
|
||||
offset := partIndex * chunkSize
|
||||
size := chunkSize
|
||||
if partIndex == totalParts-1 {
|
||||
size = fileSize - offset
|
||||
}
|
||||
var reader *stream.SectionReader
|
||||
var rateLimitedRd io.Reader
|
||||
crc32Value := ""
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
if reader == nil {
|
||||
var err error
|
||||
reader, err = ss.GetSectionReader(offset, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hash.Reset()
|
||||
w, err := utils.CopyWithBuffer(hash, reader)
|
||||
if w != size {
|
||||
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", size, w, err)
|
||||
}
|
||||
crc32Value = hex.EncodeToString(hash.Sum(nil))
|
||||
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Do: func(ctx context.Context) error {
|
||||
reader.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s?uploadid=%s&part_number=%d&phase=transfer", uploadUrl, uploadID, partNumber), rateLimitedRd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header = map[string][]string{
|
||||
"Referer": {BaseURL + "/"},
|
||||
"Origin": {BaseURL},
|
||||
"User-Agent": {UserAgent},
|
||||
"X-Storage-U": {d.UserId},
|
||||
"Authorization": {storeInfo.Auth},
|
||||
"Content-Type": {"application/octet-stream"},
|
||||
"Content-Crc32": {crc32Value},
|
||||
"Content-Length": {fmt.Sprintf("%d", size)},
|
||||
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
|
||||
}
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
bytes, _ := io.ReadAll(res.Body)
|
||||
uploadResp := UploadResp{}
|
||||
utils.Json.Unmarshal(bytes, &uploadResp)
|
||||
if uploadResp.Code != 2000 {
|
||||
return fmt.Errorf("upload part failed: %s", uploadResp.Message)
|
||||
} else if uploadResp.Data.Crc32 != crc32Value {
|
||||
return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32)
|
||||
}
|
||||
// 记录成功上传的分片
|
||||
partsMutex.Lock()
|
||||
parts[partIndex] = UploadPart{
|
||||
PartNumber: strconv.FormatInt(partNumber, 10),
|
||||
Etag: uploadResp.Data.Etag,
|
||||
Crc32: crc32Value,
|
||||
}
|
||||
partsMutex.Unlock()
|
||||
// 更新进度
|
||||
progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts)
|
||||
up(math.Min(progress, 95.0))
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
ss.RecycleSectionReader(reader)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@ -680,42 +745,6 @@ func (d *Doubao) initMultipartUpload(config *UploadConfig, uploadUrl string, sto
|
||||
return uploadResp.Data.UploadId, nil
|
||||
}
|
||||
|
||||
// 分片上传实现
|
||||
func (d *Doubao) uploadPart(config *UploadConfig, uploadUrl, uploadID string, partNumber int64, data []byte, crc32Value string) (resp UploadPart, err error) {
|
||||
uploadResp := UploadResp{}
|
||||
storeInfo := config.InnerUploadAddress.UploadNodes[0].StoreInfos[0]
|
||||
|
||||
_, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
|
||||
req.SetHeaders(map[string]string{
|
||||
"Content-Type": "application/octet-stream",
|
||||
"Content-Crc32": crc32Value,
|
||||
"Content-Length": fmt.Sprintf("%d", len(data)),
|
||||
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
|
||||
})
|
||||
|
||||
req.SetQueryParams(map[string]string{
|
||||
"uploadid": uploadID,
|
||||
"part_number": strconv.FormatInt(partNumber, 10),
|
||||
"phase": "transfer",
|
||||
})
|
||||
|
||||
req.SetBody(data)
|
||||
req.SetContentLength(true)
|
||||
}, &uploadResp)
|
||||
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
if uploadResp.Code != 2000 {
|
||||
return resp, fmt.Errorf("upload part failed: %s", uploadResp.Message)
|
||||
} else if uploadResp.Data.Crc32 != crc32Value {
|
||||
return resp, fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32)
|
||||
}
|
||||
|
||||
return uploadResp.Data, nil
|
||||
}
|
||||
|
||||
// 完成分片上传
|
||||
func (d *Doubao) completeMultipartUpload(config *UploadConfig, uploadUrl, uploadID string, parts []UploadPart) error {
|
||||
uploadResp := UploadResp{}
|
||||
@ -784,13 +813,6 @@ func (d *Doubao) commitMultipartUpload(uploadConfig *UploadConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 计算CRC32
|
||||
func calculateCRC32(data []byte) string {
|
||||
hash := crc32.NewIEEE()
|
||||
hash.Write(data)
|
||||
return hex.EncodeToString(hash.Sum(nil))
|
||||
}
|
||||
|
||||
// _retryOperation 操作重试
|
||||
func (d *Doubao) _retryOperation(operation string, fn func() error) error {
|
||||
return retry.Do(
|
||||
|
@ -192,12 +192,11 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
|
||||
url := d.contentBase + "/2/files/upload_session/append_v2"
|
||||
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize))
|
||||
req, err := http.NewRequest(http.MethodPost, url, reader)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, reader)
|
||||
if err != nil {
|
||||
log.Errorf("failed to update file when append to upload session, err: %+v", err)
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
|
||||
|
||||
|
@ -169,11 +169,10 @@ func (d *Dropbox) getFiles(ctx context.Context, path string) ([]File, error) {
|
||||
|
||||
func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset int64, sessionId string) error {
|
||||
url := d.contentBase + "/2/files/upload_session/finish"
|
||||
req, err := http.NewRequest(http.MethodPost, url, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
|
||||
|
||||
@ -214,11 +213,10 @@ func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset
|
||||
|
||||
func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
|
||||
url := d.contentBase + "/2/files/upload_session/start"
|
||||
req, err := http.NewRequest(http.MethodPost, url, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
|
||||
req.Header.Set("Dropbox-API-Arg", "{\"close\":false}")
|
||||
|
@ -31,13 +31,13 @@ func (c *customTokenSource) Token() (*oauth2.Token, error) {
|
||||
v.Set("client_id", c.config.ClientID)
|
||||
v.Set("client_secret", c.config.ClientSecret)
|
||||
|
||||
req, err := http.NewRequest("POST", c.config.TokenURL, strings.NewReader(v.Encode()))
|
||||
req, err := http.NewRequestWithContext(c.ctx, http.MethodPost, c.config.TokenURL, strings.NewReader(v.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req.WithContext(c.ctx))
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -2,12 +2,15 @@ package ftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
stdpath "path"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/jlaffaye/ftp"
|
||||
)
|
||||
@ -16,6 +19,9 @@ type FTP struct {
|
||||
model.Storage
|
||||
Addition
|
||||
conn *ftp.ServerConn
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func (d *FTP) Config() driver.Config {
|
||||
@ -27,12 +33,16 @@ func (d *FTP) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *FTP) Init(ctx context.Context) error {
|
||||
return d._login()
|
||||
d.ctx, d.cancel = context.WithCancel(context.Background())
|
||||
var err error
|
||||
d.conn, err = d._login(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *FTP) Drop(ctx context.Context) error {
|
||||
if d.conn != nil {
|
||||
_ = d.conn.Logout()
|
||||
_ = d.conn.Quit()
|
||||
d.cancel()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -62,25 +72,51 @@ func (d *FTP) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]m
|
||||
}
|
||||
|
||||
func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if err := d.login(); err != nil {
|
||||
conn, err := d._login(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
remoteFile := NewFileReader(d.conn, encode(file.GetPath(), d.Encoding), file.GetSize())
|
||||
if remoteFile != nil && !d.Config().OnlyLinkMFile {
|
||||
return &model.Link{
|
||||
RangeReader: &model.FileRangeReader{
|
||||
RangeReaderIF: stream.RateLimitRangeReaderFunc(stream.GetRangeReaderFromMFile(file.GetSize(), remoteFile)),
|
||||
},
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
path := encode(file.GetPath(), d.Encoding)
|
||||
size := file.GetSize()
|
||||
resultRangeReader := func(context context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
length := httpRange.Length
|
||||
if length < 0 || httpRange.Start+length > size {
|
||||
length = size - httpRange.Start
|
||||
}
|
||||
var c *ftp.ServerConn
|
||||
if ctx == context {
|
||||
c = conn
|
||||
} else {
|
||||
var err error
|
||||
c, err = d._login(context)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
resp, err := c.RetrFrom(path, uint64(httpRange.Start))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var close utils.CloseFunc
|
||||
if context == ctx {
|
||||
close = resp.Close
|
||||
} else {
|
||||
close = func() error {
|
||||
return errors.Join(resp.Close(), c.Quit())
|
||||
}
|
||||
}
|
||||
return utils.ReadCloser{
|
||||
Reader: io.LimitReader(resp, length),
|
||||
Closer: close,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
MFile: &stream.RateLimitFile{
|
||||
File: remoteFile,
|
||||
Limiter: stream.ServerDownloadLimit,
|
||||
Ctx: ctx,
|
||||
RangeReader: &model.FileRangeReader{
|
||||
RangeReaderIF: stream.RateLimitRangeReaderFunc(resultRangeReader),
|
||||
},
|
||||
SyncClosers: utils.NewSyncClosers(utils.CloseFunc(conn.Quit)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ type Addition struct {
|
||||
var config = driver.Config{
|
||||
Name: "FTP",
|
||||
LocalSort: true,
|
||||
OnlyLinkMFile: true,
|
||||
OnlyLinkMFile: false,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
@ -1,11 +1,8 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||
@ -15,112 +12,32 @@ import (
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
func (d *FTP) login() error {
|
||||
err, _, _ := singleflight.ErrorGroup.Do(fmt.Sprintf("FTP.login:%p", d), func() (error, error) {
|
||||
return d._login(), nil
|
||||
_, err, _ := singleflight.AnyGroup.Do(fmt.Sprintf("FTP.login:%p", d), func() (any, error) {
|
||||
var err error
|
||||
if d.conn != nil {
|
||||
err = d.conn.NoOp()
|
||||
if err != nil {
|
||||
d.conn.Quit()
|
||||
d.conn = nil
|
||||
}
|
||||
}
|
||||
if d.conn == nil {
|
||||
d.conn, err = d._login(d.ctx)
|
||||
}
|
||||
return nil, err
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *FTP) _login() error {
|
||||
|
||||
if d.conn != nil {
|
||||
_, err := d.conn.CurrentDir()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second))
|
||||
func (d *FTP) _login(ctx context.Context) (*ftp.ServerConn, error) {
|
||||
conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second), ftp.DialWithContext(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
err = conn.Login(d.Username, d.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
conn.Quit()
|
||||
return nil, err
|
||||
}
|
||||
d.conn = conn
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileReader An FTP file reader that implements io.MFile for seeking.
|
||||
type FileReader struct {
|
||||
conn *ftp.ServerConn
|
||||
resp *ftp.Response
|
||||
offset atomic.Int64
|
||||
readAtOffset int64
|
||||
mu sync.Mutex
|
||||
path string
|
||||
size int64
|
||||
}
|
||||
|
||||
func NewFileReader(conn *ftp.ServerConn, path string, size int64) *FileReader {
|
||||
return &FileReader{
|
||||
conn: conn,
|
||||
path: path,
|
||||
size: size,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *FileReader) Read(buf []byte) (n int, err error) {
|
||||
n, err = r.ReadAt(buf, r.offset.Load())
|
||||
r.offset.Add(int64(n))
|
||||
return
|
||||
}
|
||||
|
||||
func (r *FileReader) ReadAt(buf []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return -1, os.ErrInvalid
|
||||
}
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if off != r.readAtOffset {
|
||||
//have to restart the connection, to correct offset
|
||||
_ = r.resp.Close()
|
||||
r.resp = nil
|
||||
}
|
||||
|
||||
if r.resp == nil {
|
||||
r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
|
||||
r.readAtOffset = off
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
n, err = r.resp.Read(buf)
|
||||
r.readAtOffset += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
oldOffset := r.offset.Load()
|
||||
var newOffset int64
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
newOffset = offset
|
||||
case io.SeekCurrent:
|
||||
newOffset = oldOffset + offset
|
||||
case io.SeekEnd:
|
||||
return r.size, nil
|
||||
default:
|
||||
return -1, os.ErrInvalid
|
||||
}
|
||||
|
||||
if newOffset < 0 {
|
||||
// offset out of range
|
||||
return oldOffset, os.ErrInvalid
|
||||
}
|
||||
if newOffset == oldOffset {
|
||||
// offset not changed, so return directly
|
||||
return oldOffset, nil
|
||||
}
|
||||
r.offset.Store(newOffset)
|
||||
return newOffset, nil
|
||||
}
|
||||
|
||||
func (r *FileReader) Close() error {
|
||||
if r.resp != nil {
|
||||
return r.resp.Close()
|
||||
}
|
||||
return nil
|
||||
return conn, nil
|
||||
}
|
||||
|
@ -5,17 +5,20 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/avast/retry-go"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
@ -251,28 +254,58 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error {
|
||||
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string) error {
|
||||
var defaultChunkSize = d.ChunkSize * 1024 * 1024
|
||||
var offset int64 = 0
|
||||
for offset < stream.GetSize() {
|
||||
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
url += "?includeItemsFromAllDrives=true&supportsAllDrives=true"
|
||||
for offset < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
chunkSize := stream.GetSize() - offset
|
||||
if chunkSize > defaultChunkSize {
|
||||
chunkSize = defaultChunkSize
|
||||
}
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize})
|
||||
chunkSize := min(file.GetSize()-offset, defaultChunkSize)
|
||||
reader, err := ss.GetSectionReader(offset, chunkSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reader = driver.NewLimitedUploadStream(ctx, reader)
|
||||
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetHeaders(map[string]string{
|
||||
"Content-Length": strconv.FormatInt(chunkSize, 10),
|
||||
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()),
|
||||
}).SetBody(reader).SetContext(ctx)
|
||||
}, nil)
|
||||
limitedReader := driver.NewLimitedUploadStream(ctx, reader)
|
||||
err = retry.Do(func() error {
|
||||
reader.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, limitedReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header = map[string][]string{
|
||||
"Authorization": {"Bearer " + d.AccessToken},
|
||||
"Content-Length": {strconv.FormatInt(chunkSize, 10)},
|
||||
"Content-Range": {fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, file.GetSize())},
|
||||
}
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
bytes, _ := io.ReadAll(res.Body)
|
||||
var e Error
|
||||
utils.Json.Unmarshal(bytes, &e)
|
||||
if e.Error.Code != 0 {
|
||||
if e.Error.Code == 401 {
|
||||
err = d.refreshToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second))
|
||||
ss.RecycleSectionReader(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package LenovoNasShare
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -71,7 +72,23 @@ func (d *LenovoNasShare) List(ctx context.Context, dir model.Obj, args model.Lis
|
||||
files = append(files, resp.Data.List...)
|
||||
|
||||
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
||||
return src, nil
|
||||
if src.IsDir() {
|
||||
return src, nil
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
Name: src.GetName(),
|
||||
Size: src.GetSize(),
|
||||
Modified: src.ModTime(),
|
||||
IsFolder: src.IsDir(),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: func() string {
|
||||
thumbUrl := d.Host + "/oneproxy/api/share/v1/file/thumb?code=" + d.ShareId + "&stoken=" + d.stoken + "&path=" + url.QueryEscape(src.GetPath())
|
||||
return thumbUrl
|
||||
}(),
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
|
||||
|
92
drivers/local/benchmark_calculatedirsize_test.go
Normal file
92
drivers/local/benchmark_calculatedirsize_test.go
Normal file
@ -0,0 +1,92 @@
|
||||
package local
|
||||
|
||||
// TestDirCalculateSize tests the directory size calculation
|
||||
// It should be run with the local driver enabled and directory size calculation set to true
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
)
|
||||
|
||||
func generatedTestDir(dir string, dep, filecount int) {
|
||||
if dep == 0 {
|
||||
return
|
||||
}
|
||||
for i := 0; i < dep; i++ {
|
||||
subDir := dir + "/dir" + strconv.Itoa(i)
|
||||
os.Mkdir(subDir, 0755)
|
||||
generatedTestDir(subDir, dep-1, filecount)
|
||||
generatedFiles(subDir, filecount)
|
||||
}
|
||||
}
|
||||
|
||||
func generatedFiles(path string, count int) error {
|
||||
for i := 0; i < count; i++ {
|
||||
filePath := filepath.Join(path, "file"+strconv.Itoa(i)+".txt")
|
||||
file, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 使用随机ascii字符填充文件
|
||||
content := make([]byte, 1024) // 1KB file
|
||||
for j := range content {
|
||||
content[j] = byte('a' + j%26) // Fill with 'a' to 'z'
|
||||
}
|
||||
_, err = file.Write(content)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// performance tests for directory size calculation
|
||||
func BenchmarkCalculateDirSize(t *testing.B) {
|
||||
// 初始化t的日志
|
||||
t.Logf("Starting performance test for directory size calculation")
|
||||
// 确保测试目录存在
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping performance test in short mode")
|
||||
}
|
||||
// 创建tmp directory for testing
|
||||
testTempDir := t.TempDir()
|
||||
err := os.MkdirAll(testTempDir, 0755)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create test directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(testTempDir) // Clean up after test
|
||||
// 构建一个深度为5,每层10个文件和10个目录的目录结构
|
||||
generatedTestDir(testTempDir, 5, 10)
|
||||
// Initialize the local driver with directory size calculation enabled
|
||||
d := &Local{
|
||||
directoryMap: DirectoryMap{
|
||||
root: testTempDir,
|
||||
},
|
||||
Addition: Addition{
|
||||
DirectorySize: true,
|
||||
RootPath: driver.RootPath{
|
||||
RootFolderPath: testTempDir,
|
||||
},
|
||||
},
|
||||
}
|
||||
//record the start time
|
||||
t.StartTimer()
|
||||
// Calculate the directory size
|
||||
err = d.directoryMap.RecalculateDirSize()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate directory size: %v", err)
|
||||
}
|
||||
//record the end time
|
||||
t.StopTimer()
|
||||
// Print the size and duration
|
||||
node, ok := d.directoryMap.Get(d.directoryMap.root)
|
||||
if !ok {
|
||||
t.Fatalf("Failed to get root node from directory map")
|
||||
}
|
||||
t.Logf("Directory size: %d bytes", node.fileSum+node.directorySum)
|
||||
t.Logf("Performance test completed successfully")
|
||||
}
|
@ -33,6 +33,9 @@ type Local struct {
|
||||
Addition
|
||||
mkdirPerm int32
|
||||
|
||||
// directory size data
|
||||
directoryMap DirectoryMap
|
||||
|
||||
// zero means no limit
|
||||
thumbConcurrency int
|
||||
thumbTokenBucket TokenBucket
|
||||
@ -66,6 +69,15 @@ func (d *Local) Init(ctx context.Context) error {
|
||||
}
|
||||
d.Addition.RootFolderPath = abs
|
||||
}
|
||||
if d.DirectorySize {
|
||||
d.directoryMap.root = d.GetRootPath()
|
||||
_, err := d.directoryMap.CalculateDirSize(d.GetRootPath())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
d.directoryMap.Clear()
|
||||
}
|
||||
if d.ThumbCacheFolder != "" && !utils.Exists(d.ThumbCacheFolder) {
|
||||
err := os.MkdirAll(d.ThumbCacheFolder, os.FileMode(d.mkdirPerm))
|
||||
if err != nil {
|
||||
@ -124,6 +136,9 @@ func (d *Local) GetAddition() driver.Additional {
|
||||
func (d *Local) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
fullPath := dir.GetPath()
|
||||
rawFiles, err := readDir(fullPath)
|
||||
if d.DirectorySize && args.Refresh {
|
||||
d.directoryMap.RecalculateDirSize()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -147,7 +162,12 @@ func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string
|
||||
}
|
||||
isFolder := f.IsDir() || isSymlinkDir(f, fullPath)
|
||||
var size int64
|
||||
if !isFolder {
|
||||
if isFolder {
|
||||
node, ok := d.directoryMap.Get(filepath.Join(fullPath, f.Name()))
|
||||
if ok {
|
||||
size = node.fileSum + node.directorySum
|
||||
}
|
||||
} else {
|
||||
size = f.Size()
|
||||
}
|
||||
var ctime time.Time
|
||||
@ -186,7 +206,12 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
isFolder := f.IsDir() || isSymlinkDir(f, path)
|
||||
size := f.Size()
|
||||
if isFolder {
|
||||
size = 0
|
||||
node, ok := d.directoryMap.Get(path)
|
||||
if ok {
|
||||
size = node.fileSum + node.directorySum
|
||||
}
|
||||
} else {
|
||||
size = f.Size()
|
||||
}
|
||||
var ctime time.Time
|
||||
t, err := times.Stat(path)
|
||||
@ -245,13 +270,12 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
link.ContentLength = file.GetSize()
|
||||
link.MFile = open
|
||||
}
|
||||
if link.MFile != nil && !d.Config().OnlyLinkMFile {
|
||||
link.AddIfCloser(link.MFile)
|
||||
link.RangeReader = &model.FileRangeReader{
|
||||
RangeReaderIF: stream.GetRangeReaderFromMFile(file.GetSize(), link.MFile),
|
||||
}
|
||||
link.AddIfCloser(link.MFile)
|
||||
if !d.Config().OnlyLinkMFile {
|
||||
link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, link.MFile)
|
||||
link.MFile = nil
|
||||
}
|
||||
return link, nil
|
||||
@ -272,22 +296,31 @@ func (d *Local) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if utils.IsSubPath(srcPath, dstPath) {
|
||||
return fmt.Errorf("the destination folder is a subfolder of the source folder")
|
||||
}
|
||||
if err := os.Rename(srcPath, dstPath); err != nil && strings.Contains(err.Error(), "invalid cross-device link") {
|
||||
// Handle cross-device file move in local driver
|
||||
if err = d.Copy(ctx, srcObj, dstDir); err != nil {
|
||||
return err
|
||||
} else {
|
||||
// Directly remove file without check recycle bin if successfully copied
|
||||
if srcObj.IsDir() {
|
||||
err = os.RemoveAll(srcObj.GetPath())
|
||||
} else {
|
||||
err = os.Remove(srcObj.GetPath())
|
||||
}
|
||||
err := os.Rename(srcPath, dstPath)
|
||||
if err != nil && strings.Contains(err.Error(), "invalid cross-device link") {
|
||||
// 跨设备移动,先复制再删除
|
||||
if err := d.Copy(ctx, srcObj, dstDir); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
// 复制成功后直接删除源文件/文件夹
|
||||
if srcObj.IsDir() {
|
||||
return os.RemoveAll(srcObj.GetPath())
|
||||
}
|
||||
return os.Remove(srcObj.GetPath())
|
||||
}
|
||||
if err == nil {
|
||||
srcParent := filepath.Dir(srcPath)
|
||||
dstParent := filepath.Dir(dstPath)
|
||||
if d.directoryMap.Has(srcParent) {
|
||||
d.directoryMap.UpdateDirSize(srcParent)
|
||||
d.directoryMap.UpdateDirParents(srcParent)
|
||||
}
|
||||
if d.directoryMap.Has(dstParent) {
|
||||
d.directoryMap.UpdateDirSize(dstParent)
|
||||
d.directoryMap.UpdateDirParents(dstParent)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
@ -297,6 +330,14 @@ func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) er
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if srcObj.IsDir() {
|
||||
if d.directoryMap.Has(srcPath) {
|
||||
d.directoryMap.DeleteDirNode(srcPath)
|
||||
d.directoryMap.CalculateDirSize(dstPath)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -307,11 +348,21 @@ func (d *Local) Copy(_ context.Context, srcObj, dstDir model.Obj) error {
|
||||
return fmt.Errorf("the destination folder is a subfolder of the source folder")
|
||||
}
|
||||
// Copy using otiai10/copy to perform more secure & efficient copy
|
||||
return cp.Copy(srcPath, dstPath, cp.Options{
|
||||
err := cp.Copy(srcPath, dstPath, cp.Options{
|
||||
Sync: true, // Sync file to disk after copy, may have performance penalty in filesystem such as ZFS
|
||||
PreserveTimes: true,
|
||||
PreserveOwner: true,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if d.directoryMap.Has(filepath.Dir(dstPath)) {
|
||||
d.directoryMap.UpdateDirSize(filepath.Dir(dstPath))
|
||||
d.directoryMap.UpdateDirParents(filepath.Dir(dstPath))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
|
||||
@ -332,6 +383,19 @@ func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if obj.IsDir() {
|
||||
if d.directoryMap.Has(obj.GetPath()) {
|
||||
d.directoryMap.DeleteDirNode(obj.GetPath())
|
||||
d.directoryMap.UpdateDirSize(filepath.Dir(obj.GetPath()))
|
||||
d.directoryMap.UpdateDirParents(filepath.Dir(obj.GetPath()))
|
||||
}
|
||||
} else {
|
||||
if d.directoryMap.Has(filepath.Dir(obj.GetPath())) {
|
||||
d.directoryMap.UpdateDirSize(filepath.Dir(obj.GetPath()))
|
||||
d.directoryMap.UpdateDirParents(filepath.Dir(obj.GetPath()))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -355,6 +419,11 @@ func (d *Local) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||
if err != nil {
|
||||
log.Errorf("[local] failed to change time of %s: %s", fullPath, err)
|
||||
}
|
||||
if d.directoryMap.Has(dstDir.GetPath()) {
|
||||
d.directoryMap.UpdateDirSize(dstDir.GetPath())
|
||||
d.directoryMap.UpdateDirParents(dstDir.GetPath())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
DirectorySize bool `json:"directory_size" default:"false" help:"This might impact host performance"`
|
||||
Thumbnail bool `json:"thumbnail" required:"true" help:"enable thumbnail"`
|
||||
ThumbCacheFolder string `json:"thumb_cache_folder"`
|
||||
ThumbConcurrency string `json:"thumb_concurrency" default:"16" required:"false" help:"Number of concurrent thumbnail generation goroutines. This controls how many thumbnails can be generated in parallel."`
|
||||
@ -27,6 +28,8 @@ var config = driver.Config{
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Local{}
|
||||
return &Local{
|
||||
directoryMap: DirectoryMap{},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -8,9 +8,11 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
@ -153,3 +155,253 @@ func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
|
||||
}
|
||||
return &buf, nil, nil
|
||||
}
|
||||
|
||||
type DirectoryMap struct {
|
||||
root string
|
||||
data sync.Map
|
||||
}
|
||||
|
||||
type DirectoryNode struct {
|
||||
fileSum int64
|
||||
directorySum int64
|
||||
children []string
|
||||
}
|
||||
|
||||
type DirectoryTask struct {
|
||||
path string
|
||||
cache *DirectoryTaskCache
|
||||
}
|
||||
|
||||
type DirectoryTaskCache struct {
|
||||
fileSum int64
|
||||
children []string
|
||||
}
|
||||
|
||||
func (m *DirectoryMap) Has(path string) bool {
|
||||
_, ok := m.data.Load(path)
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
func (m *DirectoryMap) Get(path string) (*DirectoryNode, bool) {
|
||||
value, ok := m.data.Load(path)
|
||||
if !ok {
|
||||
return &DirectoryNode{}, false
|
||||
}
|
||||
|
||||
node, ok := value.(*DirectoryNode)
|
||||
if !ok {
|
||||
return &DirectoryNode{}, false
|
||||
}
|
||||
|
||||
return node, true
|
||||
}
|
||||
|
||||
func (m *DirectoryMap) Set(path string, node *DirectoryNode) {
|
||||
m.data.Store(path, node)
|
||||
}
|
||||
|
||||
func (m *DirectoryMap) Delete(path string) {
|
||||
m.data.Delete(path)
|
||||
}
|
||||
|
||||
func (m *DirectoryMap) Clear() {
|
||||
m.data.Clear()
|
||||
}
|
||||
|
||||
func (m *DirectoryMap) RecalculateDirSize() error {
|
||||
m.Clear()
|
||||
if m.root == "" {
|
||||
return fmt.Errorf("root path is not set")
|
||||
}
|
||||
|
||||
size, err := m.CalculateDirSize(m.root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if node, ok := m.Get(m.root); ok {
|
||||
node.fileSum = size
|
||||
node.directorySum = size
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *DirectoryMap) CalculateDirSize(dirname string) (int64, error) {
|
||||
stack := []DirectoryTask{
|
||||
{path: dirname},
|
||||
}
|
||||
|
||||
for len(stack) > 0 {
|
||||
task := stack[len(stack)-1]
|
||||
stack = stack[:len(stack)-1]
|
||||
|
||||
if task.cache != nil {
|
||||
directorySum := int64(0)
|
||||
|
||||
for _, filename := range task.cache.children {
|
||||
child, ok := m.Get(filepath.Join(task.path, filename))
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("child node not found")
|
||||
}
|
||||
directorySum += child.fileSum + child.directorySum
|
||||
}
|
||||
|
||||
m.Set(task.path, &DirectoryNode{
|
||||
fileSum: task.cache.fileSum,
|
||||
directorySum: directorySum,
|
||||
children: task.cache.children,
|
||||
})
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
files, err := readDir(task.path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
fileSum := int64(0)
|
||||
directorySum := int64(0)
|
||||
|
||||
children := []string{}
|
||||
queue := []DirectoryTask{}
|
||||
|
||||
for _, f := range files {
|
||||
fullpath := filepath.Join(task.path, f.Name())
|
||||
isFolder := f.IsDir() || isSymlinkDir(f, fullpath)
|
||||
|
||||
if isFolder {
|
||||
if node, ok := m.Get(fullpath); ok {
|
||||
directorySum += node.fileSum + node.directorySum
|
||||
} else {
|
||||
queue = append(queue, DirectoryTask{
|
||||
path: fullpath,
|
||||
})
|
||||
}
|
||||
|
||||
children = append(children, f.Name())
|
||||
} else {
|
||||
fileSum += f.Size()
|
||||
}
|
||||
}
|
||||
|
||||
if len(queue) > 0 {
|
||||
stack = append(stack, DirectoryTask{
|
||||
path: task.path,
|
||||
cache: &DirectoryTaskCache{
|
||||
fileSum: fileSum,
|
||||
children: children,
|
||||
},
|
||||
})
|
||||
|
||||
stack = append(stack, queue...)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
m.Set(task.path, &DirectoryNode{
|
||||
fileSum: fileSum,
|
||||
directorySum: directorySum,
|
||||
children: children,
|
||||
})
|
||||
}
|
||||
|
||||
if node, ok := m.Get(dirname); ok {
|
||||
return node.fileSum + node.directorySum, nil
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (m *DirectoryMap) UpdateDirSize(dirname string) (int64, error) {
|
||||
node, ok := m.Get(dirname)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("directory node not found")
|
||||
}
|
||||
|
||||
files, err := readDir(dirname)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
fileSum := int64(0)
|
||||
directorySum := int64(0)
|
||||
|
||||
children := []string{}
|
||||
|
||||
for _, f := range files {
|
||||
fullpath := filepath.Join(dirname, f.Name())
|
||||
isFolder := f.IsDir() || isSymlinkDir(f, fullpath)
|
||||
|
||||
if isFolder {
|
||||
if node, ok := m.Get(fullpath); ok {
|
||||
directorySum += node.fileSum + node.directorySum
|
||||
} else {
|
||||
value, err := m.CalculateDirSize(fullpath)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
directorySum += value
|
||||
}
|
||||
|
||||
children = append(children, f.Name())
|
||||
} else {
|
||||
fileSum += f.Size()
|
||||
}
|
||||
}
|
||||
|
||||
for _, c := range node.children {
|
||||
if !slices.Contains(children, c) {
|
||||
m.DeleteDirNode(filepath.Join(dirname, c))
|
||||
}
|
||||
}
|
||||
|
||||
node.fileSum = fileSum
|
||||
node.directorySum = directorySum
|
||||
node.children = children
|
||||
|
||||
return fileSum + directorySum, nil
|
||||
}
|
||||
|
||||
func (m *DirectoryMap) UpdateDirParents(dirname string) error {
|
||||
parentPath := filepath.Dir(dirname)
|
||||
for parentPath != m.root && !strings.HasPrefix(m.root, parentPath) {
|
||||
if node, ok := m.Get(parentPath); ok {
|
||||
directorySum := int64(0)
|
||||
|
||||
for _, c := range node.children {
|
||||
child, ok := m.Get(filepath.Join(parentPath, c))
|
||||
if !ok {
|
||||
return fmt.Errorf("child node not found")
|
||||
}
|
||||
directorySum += child.fileSum + child.directorySum
|
||||
}
|
||||
|
||||
node.directorySum = directorySum
|
||||
}
|
||||
|
||||
parentPath = filepath.Dir(parentPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *DirectoryMap) DeleteDirNode(dirname string) error {
|
||||
stack := []string{dirname}
|
||||
|
||||
for len(stack) > 0 {
|
||||
current := stack[len(stack)-1]
|
||||
stack = stack[:len(stack)-1]
|
||||
|
||||
if node, ok := m.Get(current); ok {
|
||||
for _, filename := range node.children {
|
||||
stack = append(stack, filepath.Join(current, filename))
|
||||
}
|
||||
|
||||
m.Delete(current)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/go-resty/resty/v2"
|
||||
@ -72,7 +73,7 @@ func (d *Misskey) getFiles(dir model.Obj) ([]model.Obj, error) {
|
||||
} else {
|
||||
body = map[string]string{}
|
||||
}
|
||||
err := d.request("/files", "POST", setBody(body), &files)
|
||||
err := d.request("/files", http.MethodPost, setBody(body), &files)
|
||||
if err != nil {
|
||||
return []model.Obj{}, err
|
||||
}
|
||||
@ -89,7 +90,7 @@ func (d *Misskey) getFolders(dir model.Obj) ([]model.Obj, error) {
|
||||
} else {
|
||||
body = map[string]string{}
|
||||
}
|
||||
err := d.request("/folders", "POST", setBody(body), &folders)
|
||||
err := d.request("/folders", http.MethodPost, setBody(body), &folders)
|
||||
if err != nil {
|
||||
return []model.Obj{}, err
|
||||
}
|
||||
@ -106,7 +107,7 @@ func (d *Misskey) list(dir model.Obj) ([]model.Obj, error) {
|
||||
|
||||
func (d *Misskey) link(file model.Obj) (*model.Link, error) {
|
||||
var mFile MFile
|
||||
err := d.request("/files/show", "POST", setBody(map[string]string{"fileId": file.GetID()}), &mFile)
|
||||
err := d.request("/files/show", http.MethodPost, setBody(map[string]string{"fileId": file.GetID()}), &mFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -117,7 +118,7 @@ func (d *Misskey) link(file model.Obj) (*model.Link, error) {
|
||||
|
||||
func (d *Misskey) makeDir(parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
var folder MFolder
|
||||
err := d.request("/folders/create", "POST", setBody(map[string]interface{}{"parentId": handleFolderId(parentDir), "name": dirName}), &folder)
|
||||
err := d.request("/folders/create", http.MethodPost, setBody(map[string]interface{}{"parentId": handleFolderId(parentDir), "name": dirName}), &folder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -127,11 +128,11 @@ func (d *Misskey) makeDir(parentDir model.Obj, dirName string) (model.Obj, error
|
||||
func (d *Misskey) move(srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if srcObj.IsDir() {
|
||||
var folder MFolder
|
||||
err := d.request("/folders/update", "POST", setBody(map[string]interface{}{"folderId": srcObj.GetID(), "parentId": handleFolderId(dstDir)}), &folder)
|
||||
err := d.request("/folders/update", http.MethodPost, setBody(map[string]interface{}{"folderId": srcObj.GetID(), "parentId": handleFolderId(dstDir)}), &folder)
|
||||
return mFolder2Object(folder), err
|
||||
} else {
|
||||
var file MFile
|
||||
err := d.request("/files/update", "POST", setBody(map[string]interface{}{"fileId": srcObj.GetID(), "folderId": handleFolderId(dstDir)}), &file)
|
||||
err := d.request("/files/update", http.MethodPost, setBody(map[string]interface{}{"fileId": srcObj.GetID(), "folderId": handleFolderId(dstDir)}), &file)
|
||||
return mFile2Object(file), err
|
||||
}
|
||||
}
|
||||
@ -139,11 +140,11 @@ func (d *Misskey) move(srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
func (d *Misskey) rename(srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
if srcObj.IsDir() {
|
||||
var folder MFolder
|
||||
err := d.request("/folders/update", "POST", setBody(map[string]string{"folderId": srcObj.GetID(), "name": newName}), &folder)
|
||||
err := d.request("/folders/update", http.MethodPost, setBody(map[string]string{"folderId": srcObj.GetID(), "name": newName}), &folder)
|
||||
return mFolder2Object(folder), err
|
||||
} else {
|
||||
var file MFile
|
||||
err := d.request("/files/update", "POST", setBody(map[string]string{"fileId": srcObj.GetID(), "name": newName}), &file)
|
||||
err := d.request("/files/update", http.MethodPost, setBody(map[string]string{"fileId": srcObj.GetID(), "name": newName}), &file)
|
||||
return mFile2Object(file), err
|
||||
}
|
||||
}
|
||||
@ -171,7 +172,7 @@ func (d *Misskey) copy(srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = d.request("/files/upload-from-url", "POST", setBody(map[string]interface{}{"url": url.URL, "folderId": handleFolderId(dstDir)}), &file)
|
||||
err = d.request("/files/upload-from-url", http.MethodPost, setBody(map[string]interface{}{"url": url.URL, "folderId": handleFolderId(dstDir)}), &file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -181,10 +182,10 @@ func (d *Misskey) copy(srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
|
||||
func (d *Misskey) remove(obj model.Obj) error {
|
||||
if obj.IsDir() {
|
||||
err := d.request("/folders/delete", "POST", setBody(map[string]string{"folderId": obj.GetID()}), nil)
|
||||
err := d.request("/folders/delete", http.MethodPost, setBody(map[string]string{"folderId": obj.GetID()}), nil)
|
||||
return err
|
||||
} else {
|
||||
err := d.request("/files/delete", "POST", setBody(map[string]string{"fileId": obj.GetID()}), nil)
|
||||
err := d.request("/files/delete", http.MethodPost, setBody(map[string]string{"fileId": obj.GetID()}), nil)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -55,9 +55,7 @@ func (lrc *LyricObj) getProxyLink(ctx context.Context) *model.Link {
|
||||
|
||||
func (lrc *LyricObj) getLyricLink() *model.Link {
|
||||
return &model.Link{
|
||||
RangeReader: &model.FileRangeReader{
|
||||
RangeReaderIF: stream.GetRangeReaderFromMFile(int64(len(lrc.lyric)), strings.NewReader(lrc.lyric)),
|
||||
},
|
||||
RangeReader: stream.GetRangeReaderFromMFile(int64(len(lrc.lyric)), strings.NewReader(lrc.lyric)),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -15,6 +14,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
@ -241,23 +241,25 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||
var finish int64 = 0
|
||||
DEFAULT := d.ChunkSize * 1024 * 1024
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
left := stream.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[Onedrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
rd, err := ss.GetSectionReader(finish, byteSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = retry.Do(
|
||||
func() error {
|
||||
utils.Log.Debugf("[Onedrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl,
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
rd.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -283,6 +285,7 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
package onedrive_app
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -15,6 +14,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
@ -155,23 +155,25 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
||||
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||
var finish int64 = 0
|
||||
DEFAULT := d.ChunkSize * 1024 * 1024
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
left := stream.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[OnedriveAPP] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
rd, err := ss.GetSectionReader(finish, byteSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = retry.Do(
|
||||
func() error {
|
||||
utils.Log.Debugf("[OnedriveAPP] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl,
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
rd.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -197,6 +199,7 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -38,14 +38,14 @@ func (d *OnedriveSharelink) Init(ctx context.Context) error {
|
||||
d.cron = cron.NewCron(time.Hour * 1)
|
||||
d.cron.Do(func() {
|
||||
var err error
|
||||
d.Headers, err = d.getHeaders()
|
||||
d.Headers, err = d.getHeaders(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Get initial headers
|
||||
d.Headers, err = d.getHeaders()
|
||||
d.Headers, err = d.getHeaders(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -59,7 +59,7 @@ func (d *OnedriveSharelink) Drop(ctx context.Context) error {
|
||||
|
||||
func (d *OnedriveSharelink) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
path := dir.GetPath()
|
||||
files, err := d.getFiles(path)
|
||||
files, err := d.getFiles(ctx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -82,7 +82,7 @@ func (d *OnedriveSharelink) Link(ctx context.Context, file model.Obj, args model
|
||||
if d.HeaderTime < time.Now().Unix()-1800 {
|
||||
var err error
|
||||
log.Debug("headers are older than 30 minutes, get new headers")
|
||||
header, err = d.getHeaders()
|
||||
header, err = d.getHeaders(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package onedrive_sharelink
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@ -131,7 +132,7 @@ func getAttrValue(n *html.Node, key string) string {
|
||||
}
|
||||
|
||||
// getHeaders constructs and returns the necessary HTTP headers for accessing the OneDrive share link
|
||||
func (d *OnedriveSharelink) getHeaders() (http.Header, error) {
|
||||
func (d *OnedriveSharelink) getHeaders(ctx context.Context) (http.Header, error) {
|
||||
header := http.Header{}
|
||||
header.Set("User-Agent", base.UserAgent)
|
||||
header.Set("accept-language", "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6")
|
||||
@ -142,7 +143,7 @@ func (d *OnedriveSharelink) getHeaders() (http.Header, error) {
|
||||
if d.ShareLinkPassword == "" {
|
||||
// Create a no-redirect client
|
||||
clientNoDirect := NewNoRedirectCLient()
|
||||
req, err := http.NewRequest("GET", d.ShareLinkURL, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, d.ShareLinkURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -180,9 +181,9 @@ func (d *OnedriveSharelink) getHeaders() (http.Header, error) {
|
||||
}
|
||||
|
||||
// getFiles retrieves the files from the OneDrive share link at the specified path
|
||||
func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) {
|
||||
func (d *OnedriveSharelink) getFiles(ctx context.Context, path string) ([]Item, error) {
|
||||
clientNoDirect := NewNoRedirectCLient()
|
||||
req, err := http.NewRequest("GET", d.ShareLinkURL, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, d.ShareLinkURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -221,11 +222,11 @@ func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) {
|
||||
// Get redirectUrl
|
||||
answer, err := clientNoDirect.Do(req)
|
||||
if err != nil {
|
||||
d.Headers, err = d.getHeaders()
|
||||
d.Headers, err = d.getHeaders(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.getFiles(path)
|
||||
return d.getFiles(ctx, path)
|
||||
}
|
||||
defer answer.Body.Close()
|
||||
re := regexp.MustCompile(`templateUrl":"(.*?)"`)
|
||||
@ -290,7 +291,7 @@ func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) {
|
||||
|
||||
client := &http.Client{}
|
||||
postUrl := strings.Join(redirectSplitURL[:len(redirectSplitURL)-3], "/") + "/_api/v2.1/graphql"
|
||||
req, err = http.NewRequest("POST", postUrl, strings.NewReader(graphqlVar))
|
||||
req, err = http.NewRequest(http.MethodPost, postUrl, strings.NewReader(graphqlVar))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -298,11 +299,11 @@ func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) {
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
d.Headers, err = d.getHeaders()
|
||||
d.Headers, err = d.getHeaders(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.getFiles(path)
|
||||
return d.getFiles(ctx, path)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var graphqlReq GraphQLRequest
|
||||
@ -323,31 +324,31 @@ func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) {
|
||||
|
||||
graphqlReqNEW := GraphQLNEWRequest{}
|
||||
postUrl = strings.Join(redirectSplitURL[:len(redirectSplitURL)-3], "/") + "/_api/web/GetListUsingPath(DecodedUrl=@a1)/RenderListDataAsStream" + nextHref
|
||||
req, _ = http.NewRequest("POST", postUrl, strings.NewReader(renderListDataAsStreamVar))
|
||||
req, _ = http.NewRequest(http.MethodPost, postUrl, strings.NewReader(renderListDataAsStreamVar))
|
||||
req.Header = tempHeader
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
d.Headers, err = d.getHeaders()
|
||||
d.Headers, err = d.getHeaders(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.getFiles(path)
|
||||
return d.getFiles(ctx, path)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
json.NewDecoder(resp.Body).Decode(&graphqlReqNEW)
|
||||
for graphqlReqNEW.ListData.NextHref != "" {
|
||||
graphqlReqNEW = GraphQLNEWRequest{}
|
||||
postUrl = strings.Join(redirectSplitURL[:len(redirectSplitURL)-3], "/") + "/_api/web/GetListUsingPath(DecodedUrl=@a1)/RenderListDataAsStream" + nextHref
|
||||
req, _ = http.NewRequest("POST", postUrl, strings.NewReader(renderListDataAsStreamVar))
|
||||
req, _ = http.NewRequest(http.MethodPost, postUrl, strings.NewReader(renderListDataAsStreamVar))
|
||||
req.Header = tempHeader
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
d.Headers, err = d.getHeaders()
|
||||
d.Headers, err = d.getHeaders(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.getFiles(path)
|
||||
return d.getFiles(ctx, path)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
json.NewDecoder(resp.Body).Decode(&graphqlReqNEW)
|
||||
|
@ -8,14 +8,15 @@ import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/google/uuid"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
@ -244,11 +245,8 @@ func (d *QuarkOpen) generateProofCode(file model.FileStreamer, proofSeed string,
|
||||
// 读取数据
|
||||
buf := make([]byte, length)
|
||||
n, err := io.ReadFull(reader, buf)
|
||||
if errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
return "", fmt.Errorf("can't read data, expected=%d, got=%d", length, n)
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read data: %w", err)
|
||||
if n != int(length) {
|
||||
return "", fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
|
||||
}
|
||||
|
||||
// Base64编码
|
||||
|
@ -3,6 +3,7 @@ package quark_uc_tv
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@ -96,7 +97,7 @@ func (d *QuarkUCTV) List(ctx context.Context, dir model.Obj, args model.ListArgs
|
||||
pageSize := int64(100)
|
||||
for {
|
||||
var filesData FilesData
|
||||
_, err := d.request(ctx, "/file", "GET", func(req *resty.Request) {
|
||||
_, err := d.request(ctx, "/file", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"method": "list",
|
||||
"parent_fid": dir.GetID(),
|
||||
|
@ -95,7 +95,7 @@ func (d *QuarkUCTV) getLoginCode(ctx context.Context) (string, error) {
|
||||
QrData string `json:"qr_data"`
|
||||
QueryToken string `json:"query_token"`
|
||||
}
|
||||
_, err := d.request(ctx, pathname, "GET", func(req *resty.Request) {
|
||||
_, err := d.request(ctx, pathname, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"auth_type": "code",
|
||||
"client_id": d.conf.clientID,
|
||||
@ -123,7 +123,7 @@ func (d *QuarkUCTV) getCode(ctx context.Context) (string, error) {
|
||||
CommonRsp
|
||||
Code string `json:"code"`
|
||||
}
|
||||
_, err := d.request(ctx, pathname, "GET", func(req *resty.Request) {
|
||||
_, err := d.request(ctx, pathname, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"client_id": d.conf.clientID,
|
||||
"scope": "netdisk",
|
||||
@ -138,7 +138,7 @@ func (d *QuarkUCTV) getCode(ctx context.Context) (string, error) {
|
||||
|
||||
func (d *QuarkUCTV) getRefreshTokenByTV(ctx context.Context, code string, isRefresh bool) error {
|
||||
pathname := "/token"
|
||||
_, _, reqID := d.generateReqSign("POST", pathname, d.conf.signKey)
|
||||
_, _, reqID := d.generateReqSign(http.MethodPost, pathname, d.conf.signKey)
|
||||
u := d.conf.codeApi + pathname
|
||||
var resp RefreshTokenAuthResp
|
||||
body := map[string]string{
|
||||
|
@ -38,7 +38,7 @@ func getCredentials(AccessKey, SecretKey string) (rst Credentials, err error) {
|
||||
sign := hex.EncodeToString(hmacObj.Sum(nil))
|
||||
Authorization := "TOKEN " + AccessKey + ":" + sign
|
||||
|
||||
req, err := http.NewRequest("POST", "https://api.dogecloud.com"+apiPath, strings.NewReader(string(reqBody)))
|
||||
req, err := http.NewRequest(http.MethodPost, "https://api.dogecloud.com"+apiPath, strings.NewReader(string(reqBody)))
|
||||
if err != nil {
|
||||
return rst, err
|
||||
}
|
||||
|
@ -63,20 +63,20 @@ func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if remoteFile != nil && !d.Config().OnlyLinkMFile {
|
||||
mFile := &stream.RateLimitFile{
|
||||
File: remoteFile,
|
||||
Limiter: stream.ServerDownloadLimit,
|
||||
Ctx: ctx,
|
||||
}
|
||||
if !d.Config().OnlyLinkMFile {
|
||||
return &model.Link{
|
||||
RangeReader: &model.FileRangeReader{
|
||||
RangeReaderIF: stream.RateLimitRangeReaderFunc(stream.GetRangeReaderFromMFile(file.GetSize(), remoteFile)),
|
||||
},
|
||||
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
}, nil
|
||||
}
|
||||
return &model.Link{
|
||||
MFile: &stream.RateLimitFile{
|
||||
File: remoteFile,
|
||||
Limiter: stream.ServerDownloadLimit,
|
||||
Ctx: ctx,
|
||||
},
|
||||
MFile: mFile,
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -13,8 +13,8 @@ import (
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
func (d *SFTP) initClient() error {
|
||||
err, _, _ := singleflight.ErrorGroup.Do(fmt.Sprintf("SFTP.initClient:%p", d), func() (error, error) {
|
||||
return d._initClient(), nil
|
||||
_, err, _ := singleflight.AnyGroup.Do(fmt.Sprintf("SFTP.initClient:%p", d), func() (any, error) {
|
||||
return nil, d._initClient()
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
@ -81,19 +81,20 @@ func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
||||
return nil, err
|
||||
}
|
||||
d.updateLastConnTime()
|
||||
if remoteFile != nil && !d.Config().OnlyLinkMFile {
|
||||
mFile := &stream.RateLimitFile{
|
||||
File: remoteFile,
|
||||
Limiter: stream.ServerDownloadLimit,
|
||||
Ctx: ctx,
|
||||
}
|
||||
if !d.Config().OnlyLinkMFile {
|
||||
return &model.Link{
|
||||
RangeReader: &model.FileRangeReader{
|
||||
RangeReaderIF: stream.RateLimitRangeReaderFunc(stream.GetRangeReaderFromMFile(file.GetSize(), remoteFile)),
|
||||
},
|
||||
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
}, nil
|
||||
}
|
||||
return &model.Link{
|
||||
MFile: &stream.RateLimitFile{
|
||||
File: remoteFile,
|
||||
Limiter: stream.ServerDownloadLimit,
|
||||
Ctx: ctx,
|
||||
},
|
||||
MFile: mFile,
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -28,8 +28,8 @@ func (d *SMB) getLastConnTime() time.Time {
|
||||
}
|
||||
|
||||
func (d *SMB) initFS() error {
|
||||
err, _, _ := singleflight.ErrorGroup.Do(fmt.Sprintf("SMB.initFS:%p", d), func() (error, error) {
|
||||
return d._initFS(), nil
|
||||
_, err, _ := singleflight.AnyGroup.Do(fmt.Sprintf("SMB.initFS:%p", d), func() (any, error) {
|
||||
return nil, d._initFS()
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
@ -3,13 +3,17 @@ package strm
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
)
|
||||
|
||||
type Strm struct {
|
||||
@ -18,6 +22,9 @@ type Strm struct {
|
||||
pathMap map[string][]string
|
||||
autoFlatten bool
|
||||
oneKey string
|
||||
|
||||
supportSuffix map[string]struct{}
|
||||
downloadSuffix map[string]struct{}
|
||||
}
|
||||
|
||||
func (d *Strm) Config() driver.Config {
|
||||
@ -51,12 +58,24 @@ func (d *Strm) Init(ctx context.Context) error {
|
||||
d.autoFlatten = false
|
||||
}
|
||||
|
||||
d.supportSuffix = supportSuffix()
|
||||
if d.FilterFileTypes != "" {
|
||||
types := strings.Split(d.FilterFileTypes, ",")
|
||||
for _, ext := range types {
|
||||
ext = strings.ToLower(strings.TrimSpace(ext))
|
||||
if ext != "" {
|
||||
supportSuffix[ext] = struct{}{}
|
||||
d.supportSuffix[ext] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
d.downloadSuffix = downloadSuffix()
|
||||
if d.DownloadFileTypes != "" {
|
||||
downloadTypes := strings.Split(d.DownloadFileTypes, ",")
|
||||
for _, ext := range downloadTypes {
|
||||
ext = strings.ToLower(strings.TrimSpace(ext))
|
||||
if ext != "" {
|
||||
d.downloadSuffix[ext] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -65,6 +84,8 @@ func (d *Strm) Init(ctx context.Context) error {
|
||||
|
||||
func (d *Strm) Drop(ctx context.Context) error {
|
||||
d.pathMap = nil
|
||||
d.downloadSuffix = nil
|
||||
d.supportSuffix = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -82,10 +103,25 @@ func (d *Strm) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
obj, err := d.get(ctx, path, dst, sub)
|
||||
if err == nil {
|
||||
return obj, nil
|
||||
reqPath := stdpath.Join(dst, sub)
|
||||
obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// fs.Get 没报错,说明不是strm生成的路径,需要直接返回
|
||||
size := int64(0)
|
||||
if !obj.IsDir() {
|
||||
size = obj.GetSize()
|
||||
path = reqPath //把路径设置为真实的,供Link直接读取
|
||||
}
|
||||
return &model.Object{
|
||||
Path: path,
|
||||
Name: obj.GetName(),
|
||||
Size: size,
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
}, nil
|
||||
}
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
@ -112,34 +148,33 @@ func (d *Strm) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]
|
||||
}
|
||||
|
||||
func (d *Strm) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
link := d.getLink(ctx, file.GetPath())
|
||||
return &model.Link{
|
||||
MFile: strings.NewReader(link),
|
||||
}, nil
|
||||
}
|
||||
if file.GetID() == "strm" {
|
||||
link := d.getLink(ctx, file.GetPath())
|
||||
return &model.Link{
|
||||
MFile: strings.NewReader(link),
|
||||
}, nil
|
||||
}
|
||||
// ftp,s3
|
||||
if common.GetApiUrl(ctx) == "" {
|
||||
args.Redirect = false
|
||||
}
|
||||
reqPath := file.GetPath()
|
||||
link, _, err := d.link(ctx, reqPath, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (d *Strm) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
return errors.New("strm Driver cannot make dir")
|
||||
}
|
||||
if link == nil {
|
||||
return &model.Link{
|
||||
URL: fmt.Sprintf("%s/p%s?sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(reqPath, true),
|
||||
sign.Sign(reqPath)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Strm) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
return errors.New("strm Driver cannot move file")
|
||||
}
|
||||
|
||||
func (d *Strm) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
return errors.New("strm Driver cannot rename file")
|
||||
}
|
||||
|
||||
func (d *Strm) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
return errors.New("strm Driver cannot copy file")
|
||||
}
|
||||
|
||||
func (d *Strm) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return errors.New("strm Driver cannot remove file")
|
||||
}
|
||||
|
||||
func (d *Strm) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
return errors.New("strm Driver cannot put file")
|
||||
// 没有修改link的字段,可直接返回
|
||||
return link, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Strm)(nil)
|
||||
|
@ -6,11 +6,12 @@ import (
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
Paths string `json:"paths" required:"true" type:"text"`
|
||||
SiteUrl string `json:"siteUrl" type:"text" required:"false" help:"The prefix URL of the strm file"`
|
||||
FilterFileTypes string `json:"filterFileTypes" type:"text" default:"strm" required:"false" help:"Supports suffix name of strm file"`
|
||||
EncodePath bool `json:"encodePath" default:"true" required:"true" help:"encode the path in the strm file"`
|
||||
LocalModel bool `json:"localModel" default:"false" help:"enable local mode"`
|
||||
Paths string `json:"paths" required:"true" type:"text"`
|
||||
SiteUrl string `json:"siteUrl" type:"text" required:"false" help:"The prefix URL of the strm file"`
|
||||
FilterFileTypes string `json:"filterFileTypes" type:"text" default:"strm" required:"false" help:"Supports suffix name of strm file"`
|
||||
DownloadFileTypes string `json:"downloadFileTypes" type:"text" default:"ass" required:"false" help:"Files need to download with strm (usally subtitles)"`
|
||||
EncodePath bool `json:"encodePath" default:"true" required:"true" help:"encode the path in the strm file"`
|
||||
LocalModel bool `json:"localModel" default:"false" help:"enable local mode"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -1,22 +1,36 @@
|
||||
package strm
|
||||
|
||||
var supportSuffix = map[string]struct{}{
|
||||
// video
|
||||
"mp4": {},
|
||||
"mkv": {},
|
||||
"flv": {},
|
||||
"avi": {},
|
||||
"wmv": {},
|
||||
"ts": {},
|
||||
"rmvb": {},
|
||||
"webm": {},
|
||||
// audio
|
||||
"mp3": {},
|
||||
"flac": {},
|
||||
"aac": {},
|
||||
"wav": {},
|
||||
"ogg": {},
|
||||
"m4a": {},
|
||||
"wma": {},
|
||||
"alac": {},
|
||||
func supportSuffix() map[string]struct{} {
|
||||
return map[string]struct{}{
|
||||
// video
|
||||
"mp4": {},
|
||||
"mkv": {},
|
||||
"flv": {},
|
||||
"avi": {},
|
||||
"wmv": {},
|
||||
"ts": {},
|
||||
"rmvb": {},
|
||||
"webm": {},
|
||||
// audio
|
||||
"mp3": {},
|
||||
"flac": {},
|
||||
"aac": {},
|
||||
"wav": {},
|
||||
"ogg": {},
|
||||
"m4a": {},
|
||||
"wma": {},
|
||||
"alac": {},
|
||||
}
|
||||
}
|
||||
|
||||
func downloadSuffix() map[string]struct{} {
|
||||
return map[string]struct{}{
|
||||
// strm
|
||||
"strm": {},
|
||||
// subtitles
|
||||
"ass": {},
|
||||
"srt": {},
|
||||
"vtt": {},
|
||||
"sub": {},
|
||||
}
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
@ -51,31 +52,6 @@ func (d *Strm) getRootAndPath(path string) (string, string) {
|
||||
return parts[0], parts[1]
|
||||
}
|
||||
|
||||
func (d *Strm) get(ctx context.Context, path string, dst, sub string) (model.Obj, error) {
|
||||
reqPath := stdpath.Join(dst, sub)
|
||||
obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
size := int64(0)
|
||||
if !obj.IsDir() {
|
||||
if utils.Ext(obj.GetName()) == "strm" {
|
||||
size = obj.GetSize()
|
||||
} else {
|
||||
file := stdpath.Join(reqPath, obj.GetName())
|
||||
size = int64(len(d.getLink(ctx, file)))
|
||||
}
|
||||
}
|
||||
return &model.Object{
|
||||
Path: path,
|
||||
Name: obj.GetName(),
|
||||
Size: size,
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Strm) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]model.Obj, error) {
|
||||
reqPath := stdpath.Join(dst, sub)
|
||||
objs, err := fs.List(ctx, reqPath, args)
|
||||
@ -85,62 +61,57 @@ func (d *Strm) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]
|
||||
|
||||
var validObjs []model.Obj
|
||||
for _, obj := range objs {
|
||||
id, name, path := "", obj.GetName(), ""
|
||||
size := int64(0)
|
||||
if !obj.IsDir() {
|
||||
ext := strings.ToLower(utils.Ext(obj.GetName()))
|
||||
if _, ok := supportSuffix[ext]; !ok {
|
||||
path = stdpath.Join(reqPath, obj.GetName())
|
||||
ext := strings.ToLower(utils.Ext(name))
|
||||
if _, ok := d.supportSuffix[ext]; ok {
|
||||
id = "strm"
|
||||
name = strings.TrimSuffix(name, ext) + "strm"
|
||||
size = int64(len(d.getLink(ctx, path)))
|
||||
} else if _, ok := d.downloadSuffix[ext]; ok {
|
||||
size = obj.GetSize()
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
validObjs = append(validObjs, obj)
|
||||
}
|
||||
return utils.SliceConvert(validObjs, func(obj model.Obj) (model.Obj, error) {
|
||||
name := obj.GetName()
|
||||
size := int64(0)
|
||||
if !obj.IsDir() {
|
||||
ext := utils.Ext(name)
|
||||
name = strings.TrimSuffix(name, ext) + "strm"
|
||||
if ext == "strm" {
|
||||
size = obj.GetSize()
|
||||
} else {
|
||||
file := stdpath.Join(reqPath, obj.GetName())
|
||||
size = int64(len(d.getLink(ctx, file)))
|
||||
}
|
||||
}
|
||||
objRes := model.Object{
|
||||
ID: id,
|
||||
Path: path,
|
||||
Name: name,
|
||||
Size: size,
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
Path: stdpath.Join(reqPath, obj.GetName()),
|
||||
}
|
||||
|
||||
thumb, ok := model.GetThumb(obj)
|
||||
if !ok {
|
||||
return &objRes, nil
|
||||
validObjs = append(validObjs, &objRes)
|
||||
continue
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
|
||||
validObjs = append(validObjs, &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
})
|
||||
}
|
||||
return validObjs, nil
|
||||
}
|
||||
|
||||
func (d *Strm) getLink(ctx context.Context, path string) string {
|
||||
var encodePath string
|
||||
finalPath := path
|
||||
if d.EncodePath {
|
||||
encodePath = utils.EncodePath(path, true)
|
||||
finalPath = utils.EncodePath(path, true)
|
||||
}
|
||||
if d.EnableSign {
|
||||
signPath := sign.Sign(path)
|
||||
if len(encodePath) > 0 {
|
||||
path = fmt.Sprintf("%s?sign=%s", encodePath, signPath)
|
||||
} else {
|
||||
path = fmt.Sprintf("%s?sign=%s", path, signPath)
|
||||
}
|
||||
finalPath = fmt.Sprintf("%s?sign=%s", finalPath, signPath)
|
||||
}
|
||||
if d.LocalModel {
|
||||
return path
|
||||
return finalPath
|
||||
}
|
||||
apiUrl := d.SiteUrl
|
||||
if len(apiUrl) > 0 {
|
||||
@ -151,5 +122,23 @@ func (d *Strm) getLink(ctx context.Context, path string) string {
|
||||
|
||||
return fmt.Sprintf("%s/d%s",
|
||||
apiUrl,
|
||||
path)
|
||||
finalPath)
|
||||
}
|
||||
|
||||
func (d *Strm) link(ctx context.Context, reqPath string, args model.LinkArgs) (*model.Link, model.Obj, error) {
|
||||
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !args.Redirect {
|
||||
return op.Link(ctx, storage, reqActualPath, args)
|
||||
}
|
||||
obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if common.ShouldProxy(storage, stdpath.Base(reqPath)) {
|
||||
return nil, obj, nil
|
||||
}
|
||||
return op.Link(ctx, storage, reqActualPath, args)
|
||||
}
|
||||
|
@ -54,10 +54,6 @@ func (f DummyMFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
return f.Reader.Read(p)
|
||||
}
|
||||
|
||||
func (f DummyMFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (DummyMFile) Seek(offset int64, whence int) (int64, error) {
|
||||
return offset, nil
|
||||
}
|
||||
|
@ -181,7 +181,7 @@ func (ca *CookieAuth) getSPToken() (*SuccessResponse, error) {
|
||||
|
||||
// Execute the first request which gives us an auth token for the sharepoint service
|
||||
// With this token we can authenticate on the login page and save the returned cookies
|
||||
req, err := http.NewRequest("POST", loginUrl, buf)
|
||||
req, err := http.NewRequest(http.MethodPost, loginUrl, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
5
go.mod
5
go.mod
@ -6,7 +6,6 @@ require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2
|
||||
github.com/OpenListTeam/go-cache v0.1.0
|
||||
github.com/OpenListTeam/rateg v0.1.0
|
||||
github.com/OpenListTeam/sftpd-openlist v1.0.1
|
||||
github.com/OpenListTeam/tache v0.2.0
|
||||
github.com/OpenListTeam/times v0.1.0
|
||||
@ -22,7 +21,7 @@ require (
|
||||
github.com/charmbracelet/bubbletea v1.3.6
|
||||
github.com/charmbracelet/lipgloss v1.1.0
|
||||
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e
|
||||
github.com/coreos/go-oidc v2.3.0+incompatible
|
||||
github.com/coreos/go-oidc v2.4.0+incompatible
|
||||
github.com/deckarep/golang-set/v2 v2.8.0
|
||||
github.com/dhowden/tag v0.0.0-20240417053706-3d75831295e8
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
@ -55,6 +54,7 @@ require (
|
||||
github.com/pquerna/otp v1.5.0
|
||||
github.com/rclone/rclone v1.70.3
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
|
||||
github.com/shirou/gopsutil/v4 v4.25.5
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.14.0
|
||||
github.com/spf13/cobra v1.9.1
|
||||
@ -92,7 +92,6 @@ require (
|
||||
github.com/minio/xxml v0.0.3 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/otiai10/mint v1.6.3 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.25.5 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
|
||||
)
|
||||
|
6
go.sum
6
go.sum
@ -39,16 +39,12 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
|
||||
github.com/OpenListTeam/115-sdk-go v0.2.1 h1:tzRUqdktS3h4o69+CXRDVwL0jYN7ccuX8TZWmLxkBGo=
|
||||
github.com/OpenListTeam/115-sdk-go v0.2.1/go.mod h1:cfvitk2lwe6036iNi2h+iNxwxWDifKZsSvNtrur5BqU=
|
||||
github.com/OpenListTeam/115-sdk-go v0.2.2 h1:JCrGHqQjBX3laOA6Hw4CuBovSg7g+FC5s0LEAYsRciU=
|
||||
github.com/OpenListTeam/115-sdk-go v0.2.2/go.mod h1:cfvitk2lwe6036iNi2h+iNxwxWDifKZsSvNtrur5BqU=
|
||||
github.com/OpenListTeam/go-cache v0.1.0 h1:eV2+FCP+rt+E4OCJqLUW7wGccWZNJMV0NNkh+uChbAI=
|
||||
github.com/OpenListTeam/go-cache v0.1.0/go.mod h1:AHWjKhNK3LE4rorVdKyEALDHoeMnP8SjiNyfVlB+Pz4=
|
||||
github.com/OpenListTeam/gsync v0.1.0 h1:ywzGybOvA3lW8K1BUjKZ2IUlT2FSlzPO4DOazfYXjcs=
|
||||
github.com/OpenListTeam/gsync v0.1.0/go.mod h1:h/Rvv9aX/6CdW/7B8di3xK3xNV8dUg45Fehrd/ksZ9s=
|
||||
github.com/OpenListTeam/rateg v0.1.0 h1:AvYuUjwfBcE+aMSPyJ2MduNIKSoX6f/IVFBwRolMnzE=
|
||||
github.com/OpenListTeam/rateg v0.1.0/go.mod h1:JbrgJJlwEjVMV/0qHK0QUhdFNQMz4TvJf8aZNT43bkY=
|
||||
github.com/OpenListTeam/sftpd-openlist v1.0.1 h1:j4S3iPFOpnXCUKRPS7uCT4mF2VCl34GyqvH6lqwnkUU=
|
||||
github.com/OpenListTeam/sftpd-openlist v1.0.1/go.mod h1:uO/wKnbvbdq3rBLmClMTZXuCnw7XW4wlAq4dZe91a40=
|
||||
github.com/OpenListTeam/tache v0.2.0 h1:Q4MjuyECn0CZCf1ZF91JaVaZTaps1mOTAm8bFj8sr9Q=
|
||||
@ -209,6 +205,8 @@ github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJ
|
||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||
github.com/coreos/go-oidc v2.3.0+incompatible h1:+5vEsrgprdLjjQ9FzIKAzQz1wwPD+83hQRfUIPh7rO0=
|
||||
github.com/coreos/go-oidc v2.3.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-oidc v2.4.0+incompatible h1:xjdlhLWXcINyUJgLQ9I76g7osgC2goiL6JDXS6Fegjk=
|
||||
github.com/coreos/go-oidc v2.4.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/net"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/caarlos0/env/v9"
|
||||
"github.com/shirou/gopsutil/v4/mem"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -79,6 +80,18 @@ func InitConfig() {
|
||||
if conf.Conf.MaxConcurrency > 0 {
|
||||
net.DefaultConcurrencyLimit = &net.ConcurrencyLimit{Limit: conf.Conf.MaxConcurrency}
|
||||
}
|
||||
if conf.Conf.MaxBufferLimit < 0 {
|
||||
m, _ := mem.VirtualMemory()
|
||||
if m != nil {
|
||||
conf.MaxBufferLimit = max(int(float64(m.Total)*0.05), 4*utils.MB)
|
||||
conf.MaxBufferLimit -= conf.MaxBufferLimit % utils.MB
|
||||
} else {
|
||||
conf.MaxBufferLimit = 16 * utils.MB
|
||||
}
|
||||
} else {
|
||||
conf.MaxBufferLimit = conf.Conf.MaxBufferLimit * utils.MB
|
||||
}
|
||||
log.Infof("max buffer limit: %d", conf.MaxBufferLimit)
|
||||
if !conf.Conf.Force {
|
||||
confFromEnv()
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ func initUser() {
|
||||
} else {
|
||||
// DO NOT output the password to log file. Only output to console.
|
||||
// utils.Log.Infof("Successfully created the admin user and the initial password is: %s", adminPassword)
|
||||
fmt.Printf("Successfully created the admin user and the initial password is: %s", adminPassword)
|
||||
fmt.Printf("Successfully created the admin user and the initial password is: %s\n", adminPassword)
|
||||
}
|
||||
} else {
|
||||
utils.Log.Fatalf("[init user] Failed to get admin user: %v", err)
|
||||
|
@ -119,6 +119,7 @@ type Config struct {
|
||||
DistDir string `json:"dist_dir"`
|
||||
Log LogConfig `json:"log" envPrefix:"LOG_"`
|
||||
DelayedStart int `json:"delayed_start" env:"DELAYED_START"`
|
||||
MaxBufferLimit int `json:"max_buffer_limitMB" env:"MAX_BUFFER_LIMIT_MB"`
|
||||
MaxConnections int `json:"max_connections" env:"MAX_CONNECTIONS"`
|
||||
MaxConcurrency int `json:"max_concurrency" env:"MAX_CONCURRENCY"`
|
||||
TlsInsecureSkipVerify bool `json:"tls_insecure_skip_verify" env:"TLS_INSECURE_SKIP_VERIFY"`
|
||||
@ -174,6 +175,7 @@ func DefaultConfig(dataDir string) *Config {
|
||||
},
|
||||
},
|
||||
},
|
||||
MaxBufferLimit: -1,
|
||||
MaxConnections: 0,
|
||||
MaxConcurrency: 64,
|
||||
TlsInsecureSkipVerify: true,
|
||||
|
@ -25,6 +25,7 @@ var PrivacyReg []*regexp.Regexp
|
||||
var (
|
||||
// StoragesLoaded loaded success if empty
|
||||
StoragesLoaded = false
|
||||
MaxBufferLimit int
|
||||
)
|
||||
var (
|
||||
RawIndexHtml string
|
||||
|
@ -2,7 +2,6 @@ package model
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
@ -40,13 +39,6 @@ type Link struct {
|
||||
utils.SyncClosers `json:"-"`
|
||||
}
|
||||
|
||||
func (l *Link) Close() error {
|
||||
if clr, ok := l.MFile.(io.Closer); ok {
|
||||
return errors.Join(clr.Close(), l.SyncClosers.Close())
|
||||
}
|
||||
return l.SyncClosers.Close()
|
||||
}
|
||||
|
||||
type OtherArgs struct {
|
||||
Obj Obj
|
||||
Method string
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
|
||||
@ -22,7 +23,7 @@ import (
|
||||
|
||||
// DefaultDownloadPartSize is the default range of bytes to get at a time when
|
||||
// using Download().
|
||||
const DefaultDownloadPartSize = utils.MB * 10
|
||||
const DefaultDownloadPartSize = utils.MB * 8
|
||||
|
||||
// DefaultDownloadConcurrency is the default number of goroutines to spin up
|
||||
// when using Download().
|
||||
@ -84,6 +85,9 @@ func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readClo
|
||||
if impl.cfg.PartSize == 0 {
|
||||
impl.cfg.PartSize = DefaultDownloadPartSize
|
||||
}
|
||||
if conf.MaxBufferLimit > 0 && impl.cfg.PartSize > conf.MaxBufferLimit {
|
||||
impl.cfg.PartSize = conf.MaxBufferLimit
|
||||
}
|
||||
if impl.cfg.HttpClient == nil {
|
||||
impl.cfg.HttpClient = DefaultHttpRequestFunc
|
||||
}
|
||||
@ -159,17 +163,13 @@ func (d *downloader) download() (io.ReadCloser, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
maxPart := int(d.params.Range.Length / int64(d.cfg.PartSize))
|
||||
if d.params.Range.Length%int64(d.cfg.PartSize) > 0 {
|
||||
maxPart++
|
||||
maxPart := 1
|
||||
if d.params.Range.Length > int64(d.cfg.PartSize) {
|
||||
maxPart = int((d.params.Range.Length + int64(d.cfg.PartSize) - 1) / int64(d.cfg.PartSize))
|
||||
}
|
||||
if maxPart < d.cfg.Concurrency {
|
||||
d.cfg.Concurrency = maxPart
|
||||
}
|
||||
if d.params.Range.Length == 0 {
|
||||
d.cfg.Concurrency = 1
|
||||
}
|
||||
|
||||
log.Debugf("cfgConcurrency:%d", d.cfg.Concurrency)
|
||||
|
||||
if maxPart == 1 {
|
||||
|
@ -371,16 +371,21 @@ func DriverExtract(ctx context.Context, storage driver.Driver, path string, args
|
||||
return link.Link, link.Obj, nil
|
||||
}
|
||||
|
||||
var forget utils.CloseFunc
|
||||
var forget any
|
||||
var linkM *extractLink
|
||||
fn := func() (*extractLink, error) {
|
||||
link, err := driverExtract(ctx, storage, path, args)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed extract archive")
|
||||
}
|
||||
if link.MFile != nil && forget != nil {
|
||||
linkM = link
|
||||
return nil, errLinkMFileCache
|
||||
}
|
||||
if link.Link.Expiration != nil {
|
||||
extractCache.Set(key, link, cache.WithEx[*extractLink](*link.Link.Expiration))
|
||||
}
|
||||
link.Add(forget)
|
||||
link.AddIfCloser(forget)
|
||||
return link, nil
|
||||
}
|
||||
|
||||
@ -392,13 +397,13 @@ func DriverExtract(ctx context.Context, storage driver.Driver, path string, args
|
||||
return link.Link, link.Obj, nil
|
||||
}
|
||||
|
||||
forget = func() error {
|
||||
forget = utils.CloseFunc(func() error {
|
||||
if forget != nil {
|
||||
forget = nil
|
||||
linkG.Forget(key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
})
|
||||
link, err, _ := extractG.Do(key, fn)
|
||||
if err == nil && !link.AcquireReference() {
|
||||
link, err, _ = extractG.Do(key, fn)
|
||||
@ -406,11 +411,18 @@ func DriverExtract(ctx context.Context, storage driver.Driver, path string, args
|
||||
link.AcquireReference()
|
||||
}
|
||||
}
|
||||
if err == errLinkMFileCache {
|
||||
if linkM != nil {
|
||||
return linkM.Link, linkM.Obj, nil
|
||||
}
|
||||
forget = nil
|
||||
link, err = fn()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return link.Link, link.Obj, err
|
||||
return link.Link, link.Obj, nil
|
||||
}
|
||||
|
||||
func driverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*extractLink, error) {
|
||||
|
@ -2,6 +2,7 @@ package op
|
||||
|
||||
import (
|
||||
"context"
|
||||
stderrors "errors"
|
||||
stdpath "path"
|
||||
"slices"
|
||||
"strings"
|
||||
@ -250,6 +251,7 @@ func GetUnwrap(ctx context.Context, storage driver.Driver, path string) (model.O
|
||||
|
||||
var linkCache = cache.NewMemCache(cache.WithShards[*model.Link](16))
|
||||
var linkG = singleflight.Group[*model.Link]{Remember: true}
|
||||
var errLinkMFileCache = stderrors.New("ErrLinkMFileCache")
|
||||
|
||||
// Link get link, if is an url. should have an expiry time
|
||||
func Link(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (*model.Link, model.Obj, error) {
|
||||
@ -291,16 +293,21 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
|
||||
return link, file, nil
|
||||
}
|
||||
|
||||
var forget utils.CloseFunc
|
||||
var forget any
|
||||
var linkM *model.Link
|
||||
fn := func() (*model.Link, error) {
|
||||
link, err := storage.Link(ctx, file, args)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed get link")
|
||||
}
|
||||
if link.MFile != nil && forget != nil {
|
||||
linkM = link
|
||||
return nil, errLinkMFileCache
|
||||
}
|
||||
if link.Expiration != nil {
|
||||
linkCache.Set(key, link, cache.WithEx[*model.Link](*link.Expiration))
|
||||
}
|
||||
link.Add(forget)
|
||||
link.AddIfCloser(forget)
|
||||
return link, nil
|
||||
}
|
||||
|
||||
@ -312,13 +319,13 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
|
||||
return link, file, err
|
||||
}
|
||||
|
||||
forget = func() error {
|
||||
forget = utils.CloseFunc(func() error {
|
||||
if forget != nil {
|
||||
forget = nil
|
||||
linkG.Forget(key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
})
|
||||
link, err, _ := linkG.Do(key, fn)
|
||||
if err == nil && !link.AcquireReference() {
|
||||
link, err, _ = linkG.Do(key, fn)
|
||||
@ -326,11 +333,19 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
|
||||
link.AcquireReference()
|
||||
}
|
||||
}
|
||||
|
||||
if err == errLinkMFileCache {
|
||||
if linkM != nil {
|
||||
return linkM, file, nil
|
||||
}
|
||||
forget = nil
|
||||
link, err = fn()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return link, file, err
|
||||
return link, file, nil
|
||||
}
|
||||
|
||||
// Other api
|
||||
|
@ -8,12 +8,13 @@ import (
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go4.org/readerutil"
|
||||
)
|
||||
|
||||
@ -104,11 +105,8 @@ func (f *FileStream) GetFile() model.File {
|
||||
return nil
|
||||
}
|
||||
|
||||
const InMemoryBufMaxSize = 10 // Megabytes
|
||||
const InMemoryBufMaxSizeBytes = InMemoryBufMaxSize * 1024 * 1024
|
||||
|
||||
// RangeRead have to cache all data first since only Reader is provided.
|
||||
// also support a peeking RangeRead at very start, but won't buffer more than 10MB data in memory
|
||||
// also support a peeking RangeRead at very start, but won't buffer more than conf.MaxBufferLimit data in memory
|
||||
func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > f.GetSize() {
|
||||
httpRange.Length = f.GetSize() - httpRange.Start
|
||||
@ -122,17 +120,14 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
if f.peekBuff != nil && size <= int64(f.peekBuff.Len()) {
|
||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
if size <= InMemoryBufMaxSizeBytes {
|
||||
if size <= int64(conf.MaxBufferLimit) {
|
||||
bufSize := min(size, f.GetSize())
|
||||
// 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom
|
||||
// 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大
|
||||
buf := make([]byte, bufSize)
|
||||
n, err := io.ReadFull(f.Reader, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n != int(bufSize) {
|
||||
return nil, fmt.Errorf("stream RangeRead did not get all data in peek, expect =%d ,actual =%d", bufSize, n)
|
||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err)
|
||||
}
|
||||
f.peekBuff = bytes.NewReader(buf)
|
||||
f.Reader = io.MultiReader(f.peekBuff, f.Reader)
|
||||
@ -236,7 +231,7 @@ func (ss *SeekableStream) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
return 0, err
|
||||
}
|
||||
ss.Reader = rc
|
||||
}
|
||||
@ -301,70 +296,48 @@ func (r *ReaderUpdatingProgress) Close() error {
|
||||
return r.Reader.Close()
|
||||
}
|
||||
|
||||
type readerCur struct {
|
||||
reader io.Reader
|
||||
cur int64
|
||||
}
|
||||
|
||||
type RangeReadReadAtSeeker struct {
|
||||
ss *SeekableStream
|
||||
masterOff int64
|
||||
readers []*readerCur
|
||||
readerMap sync.Map
|
||||
headCache *headCache
|
||||
}
|
||||
|
||||
type headCache struct {
|
||||
*readerCur
|
||||
bufs [][]byte
|
||||
reader io.Reader
|
||||
bufs [][]byte
|
||||
}
|
||||
|
||||
func (c *headCache) read(p []byte) (n int, err error) {
|
||||
pL := len(p)
|
||||
logrus.Debugf("headCache read_%d", pL)
|
||||
if c.cur < int64(pL) {
|
||||
bufL := int64(pL) - c.cur
|
||||
buf := make([]byte, bufL)
|
||||
lr := io.LimitReader(c.reader, bufL)
|
||||
off := 0
|
||||
for c.cur < int64(pL) {
|
||||
n, err = lr.Read(buf[off:])
|
||||
off += n
|
||||
c.cur += int64(n)
|
||||
if err == io.EOF && off == int(bufL) {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
func (c *headCache) head(p []byte) (int, error) {
|
||||
n := 0
|
||||
for _, buf := range c.bufs {
|
||||
if len(buf)+n >= len(p) {
|
||||
n += copy(p[n:], buf[:len(p)-n])
|
||||
return n, nil
|
||||
} else {
|
||||
n += copy(p[n:], buf)
|
||||
}
|
||||
}
|
||||
w, err := io.ReadAtLeast(c.reader, p[n:], 1)
|
||||
if w > 0 {
|
||||
buf := make([]byte, w)
|
||||
copy(buf, p[n:n+w])
|
||||
c.bufs = append(c.bufs, buf)
|
||||
n += w
|
||||
}
|
||||
n = 0
|
||||
if c.cur >= int64(pL) {
|
||||
for i := 0; n < pL; i++ {
|
||||
buf := c.bufs[i]
|
||||
r := len(buf)
|
||||
if n+r > pL {
|
||||
r = pL - n
|
||||
}
|
||||
n += copy(p[n:], buf[:r])
|
||||
}
|
||||
}
|
||||
return
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *headCache) Close() error {
|
||||
for i := range r.bufs {
|
||||
r.bufs[i] = nil
|
||||
}
|
||||
clear(r.bufs)
|
||||
r.bufs = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) InitHeadCache() {
|
||||
if r.ss.GetFile() == nil && r.masterOff == 0 {
|
||||
reader := r.readers[0]
|
||||
r.readers = r.readers[1:]
|
||||
r.headCache = &headCache{readerCur: reader}
|
||||
value, _ := r.readerMap.LoadAndDelete(int64(0))
|
||||
r.headCache = &headCache{reader: value.(io.Reader)}
|
||||
r.ss.Closers.Add(r.headCache)
|
||||
}
|
||||
}
|
||||
@ -390,8 +363,7 @@ func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (mode
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
rc := &readerCur{reader: ss, cur: offset}
|
||||
r.readers = append(r.readers, rc)
|
||||
r.readerMap.Store(int64(offset), ss)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
@ -408,72 +380,64 @@ func NewMultiReaderAt(ss []*SeekableStream) (readerutil.SizeReaderAt, error) {
|
||||
return readerutil.NewMultiReaderAt(readers...), nil
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (*readerCur, error) {
|
||||
var rc *readerCur
|
||||
for _, reader := range r.readers {
|
||||
if reader.cur == -1 {
|
||||
continue
|
||||
func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (io.Reader, error) {
|
||||
var rr io.Reader
|
||||
var cur int64 = -1
|
||||
r.readerMap.Range(func(key, value any) bool {
|
||||
k := key.(int64)
|
||||
if off == k {
|
||||
cur = k
|
||||
rr = value.(io.Reader)
|
||||
return false
|
||||
}
|
||||
if reader.cur == off {
|
||||
// logrus.Debugf("getReaderAtOffset match_%d", off)
|
||||
return reader, nil
|
||||
}
|
||||
if reader.cur > 0 && off >= reader.cur && (rc == nil || reader.cur < rc.cur) {
|
||||
rc = reader
|
||||
if off > k && off-k <= 4*utils.MB && (rr == nil || k < cur) {
|
||||
rr = value.(io.Reader)
|
||||
cur = k
|
||||
}
|
||||
return true
|
||||
})
|
||||
if cur >= 0 {
|
||||
r.readerMap.Delete(int64(cur))
|
||||
}
|
||||
if rc != nil && off-rc.cur <= utils.MB {
|
||||
n, err := utils.CopyWithBufferN(io.Discard, rc.reader, off-rc.cur)
|
||||
rc.cur += n
|
||||
if err == io.EOF && rc.cur == off {
|
||||
err = nil
|
||||
}
|
||||
if err == nil {
|
||||
logrus.Debugf("getReaderAtOffset old_%d", off)
|
||||
return rc, nil
|
||||
}
|
||||
rc.cur = -1
|
||||
if off == int64(cur) {
|
||||
// logrus.Debugf("getReaderAtOffset match_%d", off)
|
||||
return rr, nil
|
||||
}
|
||||
logrus.Debugf("getReaderAtOffset new_%d", off)
|
||||
|
||||
// Range请求不能超过文件大小,有些云盘处理不了就会返回整个文件
|
||||
reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: r.ss.GetSize() - off})
|
||||
if rr != nil {
|
||||
n, _ := utils.CopyWithBufferN(io.Discard, rr, off-cur)
|
||||
cur += n
|
||||
if cur == off {
|
||||
// logrus.Debugf("getReaderAtOffset old_%d", off)
|
||||
return rr, nil
|
||||
}
|
||||
}
|
||||
// logrus.Debugf("getReaderAtOffset new_%d", off)
|
||||
|
||||
reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: -1})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rc = &readerCur{reader: reader, cur: off}
|
||||
r.readers = append(r.readers, rc)
|
||||
return rc, nil
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) ReadAt(p []byte, off int64) (int, error) {
|
||||
func (r *RangeReadReadAtSeeker) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
if off == 0 && r.headCache != nil {
|
||||
return r.headCache.read(p)
|
||||
return r.headCache.head(p)
|
||||
}
|
||||
rc, err := r.getReaderAtOffset(off)
|
||||
var rr io.Reader
|
||||
rr, err = r.getReaderAtOffset(off)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, num := 0, 0
|
||||
for num < len(p) {
|
||||
n, err = rc.reader.Read(p[num:])
|
||||
rc.cur += int64(n)
|
||||
num += n
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
if err == io.EOF {
|
||||
// io.EOF是reader读取完了
|
||||
rc.cur = -1
|
||||
// yeka/zip包 没有处理EOF,我们要兼容
|
||||
// https://github.com/yeka/zip/blob/03d6312748a9d6e0bc0c9a7275385c09f06d9c14/reader.go#L433
|
||||
if num == len(p) {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
break
|
||||
n, err = io.ReadAtLeast(rr, p, 1)
|
||||
off += int64(n)
|
||||
if err == nil {
|
||||
r.readerMap.Store(int64(off), rr)
|
||||
} else {
|
||||
rr = nil
|
||||
}
|
||||
return num, err
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
@ -500,15 +464,7 @@ func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) {
|
||||
if r.masterOff == 0 && r.headCache != nil {
|
||||
return r.headCache.read(p)
|
||||
}
|
||||
rc, err := r.getReaderAtOffset(r.masterOff)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = rc.reader.Read(p)
|
||||
rc.cur += int64(n)
|
||||
n, err = r.ReadAt(p, r.masterOff)
|
||||
r.masterOff += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
@ -1,12 +1,14 @@
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
@ -24,7 +26,7 @@ func (f RangeReaderFunc) RangeRead(ctx context.Context, httpRange http_range.Ran
|
||||
|
||||
func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF, error) {
|
||||
if link.MFile != nil {
|
||||
return &model.FileRangeReader{RangeReaderIF: GetRangeReaderFromMFile(size, link.MFile)}, nil
|
||||
return GetRangeReaderFromMFile(size, link.MFile), nil
|
||||
}
|
||||
if link.Concurrency > 0 || link.PartSize > 0 {
|
||||
down := net.NewDownloader(func(d *net.Downloader) {
|
||||
@ -95,13 +97,16 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
|
||||
return RateLimitRangeReaderFunc(rangeReader), nil
|
||||
}
|
||||
|
||||
func GetRangeReaderFromMFile(size int64, file model.File) RangeReaderFunc {
|
||||
return func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
length := httpRange.Length
|
||||
if length < 0 || httpRange.Start+length > size {
|
||||
length = size - httpRange.Start
|
||||
}
|
||||
return &model.FileCloser{File: io.NewSectionReader(file, httpRange.Start, length)}, nil
|
||||
// RangeReaderIF.RangeRead返回的io.ReadCloser保留file的签名。
|
||||
func GetRangeReaderFromMFile(size int64, file model.File) model.RangeReaderIF {
|
||||
return &model.FileRangeReader{
|
||||
RangeReaderIF: RangeReaderFunc(func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
length := httpRange.Length
|
||||
if length < 0 || httpRange.Start+length > size {
|
||||
length = size - httpRange.Start
|
||||
}
|
||||
return &model.FileCloser{File: io.NewSectionReader(file, httpRange.Start, length)}, nil
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
@ -187,3 +192,65 @@ func CacheFullInTempFileAndHash(stream model.FileStreamer, up model.UpdateProgre
|
||||
}
|
||||
return tmpF, hex.EncodeToString(h.Sum(nil)), err
|
||||
}
|
||||
|
||||
type StreamSectionReader struct {
|
||||
file model.FileStreamer
|
||||
off int64
|
||||
bufPool *sync.Pool
|
||||
}
|
||||
|
||||
func NewStreamSectionReader(file model.FileStreamer, maxBufferSize int) (*StreamSectionReader, error) {
|
||||
ss := &StreamSectionReader{file: file}
|
||||
if file.GetFile() == nil {
|
||||
maxBufferSize = min(maxBufferSize, int(file.GetSize()))
|
||||
if maxBufferSize > conf.MaxBufferLimit {
|
||||
_, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
ss.bufPool = &sync.Pool{
|
||||
New: func() any {
|
||||
return make([]byte, maxBufferSize)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
// 线程不安全
|
||||
func (ss *StreamSectionReader) GetSectionReader(off, length int64) (*SectionReader, error) {
|
||||
var cache io.ReaderAt = ss.file.GetFile()
|
||||
var buf []byte
|
||||
if cache == nil {
|
||||
if off != ss.off {
|
||||
return nil, fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off)
|
||||
}
|
||||
tempBuf := ss.bufPool.Get().([]byte)
|
||||
buf = tempBuf[:length]
|
||||
n, err := io.ReadFull(ss.file, buf)
|
||||
if int64(n) != length {
|
||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
|
||||
}
|
||||
ss.off += int64(n)
|
||||
off = 0
|
||||
cache = bytes.NewReader(buf)
|
||||
}
|
||||
return &SectionReader{io.NewSectionReader(cache, off, length), buf}, nil
|
||||
}
|
||||
|
||||
func (ss *StreamSectionReader) RecycleSectionReader(sr *SectionReader) {
|
||||
if sr != nil {
|
||||
if sr.buf != nil {
|
||||
ss.bufPool.Put(sr.buf[0:cap(sr.buf)])
|
||||
sr.buf = nil
|
||||
}
|
||||
sr.ReadSeeker = nil
|
||||
}
|
||||
}
|
||||
|
||||
type SectionReader struct {
|
||||
io.ReadSeeker
|
||||
buf []byte
|
||||
}
|
||||
|
@ -19,6 +19,8 @@ type Group struct {
|
||||
|
||||
wg sync.WaitGroup
|
||||
sem chan token
|
||||
|
||||
startChan chan token
|
||||
}
|
||||
|
||||
func NewGroupWithContext(ctx context.Context, limit int, retryOpts ...retry.Option) (*Group, context.Context) {
|
||||
@ -26,6 +28,13 @@ func NewGroupWithContext(ctx context.Context, limit int, retryOpts ...retry.Opti
|
||||
return (&Group{cancel: cancel, ctx: ctx, opts: append(retryOpts, retry.Context(ctx))}).SetLimit(limit), ctx
|
||||
}
|
||||
|
||||
// OrderedGroup
|
||||
func NewOrderedGroupWithContext(ctx context.Context, limit int, retryOpts ...retry.Option) (*Group, context.Context) {
|
||||
group, ctx := NewGroupWithContext(ctx, limit, retryOpts...)
|
||||
group.startChan = make(chan token, 1)
|
||||
return group, ctx
|
||||
}
|
||||
|
||||
func (g *Group) done() {
|
||||
if g.sem != nil {
|
||||
<-g.sem
|
||||
@ -39,18 +48,62 @@ func (g *Group) Wait() error {
|
||||
return context.Cause(g.ctx)
|
||||
}
|
||||
|
||||
func (g *Group) Go(f func(ctx context.Context) error) {
|
||||
func (g *Group) Go(do func(ctx context.Context) error) {
|
||||
g.GoWithLifecycle(Lifecycle{Do: do})
|
||||
}
|
||||
|
||||
type Lifecycle struct {
|
||||
// Before在OrderedGroup是线程安全的
|
||||
Before func(ctx context.Context) error
|
||||
// 如果Before返回err就不调用Do
|
||||
Do func(ctx context.Context) error
|
||||
// 最后调用After
|
||||
After func(err error)
|
||||
}
|
||||
|
||||
func (g *Group) GoWithLifecycle(lifecycle Lifecycle) {
|
||||
if g.startChan != nil {
|
||||
select {
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
case g.startChan <- token{}:
|
||||
}
|
||||
}
|
||||
|
||||
if g.sem != nil {
|
||||
g.sem <- token{}
|
||||
select {
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
case g.sem <- token{}:
|
||||
}
|
||||
}
|
||||
|
||||
g.wg.Add(1)
|
||||
go func() {
|
||||
defer g.done()
|
||||
if err := retry.Do(func() error { return f(g.ctx) }, g.opts...); err != nil {
|
||||
g.cancel(err)
|
||||
var err error
|
||||
if lifecycle.Before != nil {
|
||||
err = lifecycle.Before(g.ctx)
|
||||
}
|
||||
if err == nil {
|
||||
if g.startChan != nil {
|
||||
<-g.startChan
|
||||
}
|
||||
err = retry.Do(func() error { return lifecycle.Do(g.ctx) }, g.opts...)
|
||||
}
|
||||
if lifecycle.After != nil {
|
||||
lifecycle.After(err)
|
||||
}
|
||||
if err != nil {
|
||||
select {
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
default:
|
||||
g.cancel(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
func (g *Group) TryGo(f func(ctx context.Context) error) bool {
|
||||
|
@ -383,7 +383,7 @@ func (c *Client) Link(path string) (string, http.Header, error) {
|
||||
|
||||
// ReadStream reads the stream for a given path
|
||||
func (c *Client) ReadStream(path string, callback func(rq *http.Request)) (io.ReadCloser, http.Header, error) {
|
||||
rs, err := c.req("GET", path, nil, callback)
|
||||
rs, err := c.req(http.MethodGet, path, nil, callback)
|
||||
if err != nil {
|
||||
return nil, nil, newPathErrorErr("ReadStream", path, err)
|
||||
}
|
||||
@ -405,7 +405,7 @@ func (c *Client) ReadStream(path string, callback func(rq *http.Request)) (io.Re
|
||||
// this function will emulate the behavior by skipping `offset` bytes and limiting the result
|
||||
// to `length`.
|
||||
func (c *Client) ReadStreamRange(path string, offset, length int64) (io.ReadCloser, error) {
|
||||
rs, err := c.req("GET", path, nil, func(r *http.Request) {
|
||||
rs, err := c.req(http.MethodGet, path, nil, func(r *http.Request) {
|
||||
r.Header.Add("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+length-1))
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -114,7 +114,7 @@ func (c *client) post(path string, data url.Values) (*http.Response, error) {
|
||||
u := c.url.JoinPath(path)
|
||||
u.User = nil // remove userinfo for requests
|
||||
|
||||
req, err := http.NewRequest("POST", u.String(), bytes.NewReader([]byte(data.Encode())))
|
||||
req, err := http.NewRequest(http.MethodPost, u.String(), bytes.NewReader([]byte(data.Encode())))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -162,7 +162,7 @@ func (c *client) AddFromLink(link string, savePath string, id string) error {
|
||||
|
||||
u := c.url.JoinPath("/api/v2/torrents/add")
|
||||
u.User = nil // remove userinfo for requests
|
||||
req, err := http.NewRequest("POST", u.String(), buf)
|
||||
req, err := http.NewRequest(http.MethodPost, u.String(), buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -74,6 +74,7 @@ type Group[T any] struct {
|
||||
mu sync.Mutex // protects m
|
||||
m map[string]*call[T] // lazily initialized
|
||||
|
||||
// Won't remember error
|
||||
Remember bool
|
||||
}
|
||||
|
||||
@ -158,7 +159,7 @@ func (g *Group[T]) doCall(c *call[T], key string, fn func() (T, error)) {
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
c.wg.Done()
|
||||
if !g.Remember && g.m[key] == c {
|
||||
if (!g.Remember || c.err != nil) && g.m[key] == c {
|
||||
delete(g.m, key)
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,3 @@
|
||||
package singleflight
|
||||
|
||||
var ErrorGroup Group[error]
|
||||
var AnyGroup Group[any]
|
||||
|
@ -6,7 +6,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@ -164,6 +166,7 @@ func (c *Closers) Close() error {
|
||||
errs = append(errs, closer.Close())
|
||||
}
|
||||
}
|
||||
clear(*c)
|
||||
*c = (*c)[:0]
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
@ -191,32 +194,32 @@ type SyncClosersIF interface {
|
||||
|
||||
type SyncClosers struct {
|
||||
closers []io.Closer
|
||||
mu sync.Mutex
|
||||
ref int
|
||||
ref atomic.Int32
|
||||
}
|
||||
|
||||
var _ SyncClosersIF = (*SyncClosers)(nil)
|
||||
|
||||
func (c *SyncClosers) AcquireReference() bool {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if len(c.closers) == 0 {
|
||||
return false
|
||||
ref := c.ref.Add(1)
|
||||
if ref > 0 {
|
||||
// log.Debugf("SyncClosers.AcquireReference %p,ref=%d\n", c, ref)
|
||||
return true
|
||||
}
|
||||
c.ref++
|
||||
log.Debugf("SyncClosers.AcquireReference %p,ref=%d\n", c, c.ref)
|
||||
return true
|
||||
c.ref.Store(math.MinInt16)
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *SyncClosers) Close() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
defer log.Debugf("SyncClosers.Close %p,ref=%d\n", c, c.ref)
|
||||
if c.ref > 1 {
|
||||
c.ref--
|
||||
ref := c.ref.Add(-1)
|
||||
if ref < -1 {
|
||||
c.ref.Store(math.MinInt16)
|
||||
return nil
|
||||
}
|
||||
c.ref = 0
|
||||
// log.Debugf("SyncClosers.Close %p,ref=%d\n", c, ref+1)
|
||||
if ref > 0 {
|
||||
return nil
|
||||
}
|
||||
c.ref.Store(math.MinInt16)
|
||||
|
||||
var errs []error
|
||||
for _, closer := range c.closers {
|
||||
@ -224,23 +227,26 @@ func (c *SyncClosers) Close() error {
|
||||
errs = append(errs, closer.Close())
|
||||
}
|
||||
}
|
||||
c.closers = c.closers[:0]
|
||||
clear(c.closers)
|
||||
c.closers = nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *SyncClosers) Add(closer io.Closer) {
|
||||
if closer != nil {
|
||||
c.mu.Lock()
|
||||
if c.ref.Load() < 0 {
|
||||
panic("Not reusable")
|
||||
}
|
||||
c.closers = append(c.closers, closer)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *SyncClosers) AddIfCloser(a any) {
|
||||
if closer, ok := a.(io.Closer); ok {
|
||||
c.mu.Lock()
|
||||
if c.ref.Load() < 0 {
|
||||
panic("Not reusable")
|
||||
}
|
||||
c.closers = append(c.closers, closer)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
@ -278,11 +284,7 @@ var IoBuffPool = &sync.Pool{
|
||||
func CopyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) {
|
||||
buff := IoBuffPool.Get().([]byte)
|
||||
defer IoBuffPool.Put(buff)
|
||||
written, err = io.CopyBuffer(dst, src, buff)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return written, nil
|
||||
return io.CopyBuffer(dst, src, buff)
|
||||
}
|
||||
|
||||
func CopyWithBufferN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
|
||||
|
@ -1,6 +1,8 @@
|
||||
package handles
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
_115 "github.com/OpenListTeam/OpenList/v4/drivers/115"
|
||||
_115_open "github.com/OpenListTeam/OpenList/v4/drivers/115_open"
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/pikpak"
|
||||
@ -406,8 +408,14 @@ func AddOfflineDownload(c *gin.Context) {
|
||||
}
|
||||
var tasks []task.TaskExtensionInfo
|
||||
for _, url := range req.Urls {
|
||||
// Filter out empty lines and whitespace-only strings
|
||||
trimmedUrl := strings.TrimSpace(url)
|
||||
if trimmedUrl == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
t, err := tool.AddURL(c, &tool.AddURLArgs{
|
||||
URL: url,
|
||||
URL: trimmedUrl,
|
||||
DstDirPath: reqPath,
|
||||
Tool: req.Tool,
|
||||
DeletePolicy: tool.DeletePolicy(req.DeletePolicy),
|
||||
|
@ -32,6 +32,7 @@ func Init(e *gin.Engine) {
|
||||
})
|
||||
g.GET("/favicon.ico", handles.Favicon)
|
||||
g.GET("/robots.txt", handles.Robots)
|
||||
g.GET("/manifest.json", static.ManifestJSON)
|
||||
g.GET("/i/:link_name", handles.Plist)
|
||||
common.SecretKey = []byte(conf.Conf.JwtSecret)
|
||||
g.Use(middlewares.StoragesLoaded)
|
||||
|
@ -19,6 +19,13 @@ func getSiteConfig() SiteConfig {
|
||||
}
|
||||
if siteConfig.BasePath != "" {
|
||||
siteConfig.BasePath = utils.FixAndCleanPath(siteConfig.BasePath)
|
||||
// Keep consistent with frontend: trim trailing slash unless it's root
|
||||
if siteConfig.BasePath != "/" && strings.HasSuffix(siteConfig.BasePath, "/") {
|
||||
siteConfig.BasePath = strings.TrimSuffix(siteConfig.BasePath, "/")
|
||||
}
|
||||
}
|
||||
if siteConfig.BasePath == "" {
|
||||
siteConfig.BasePath = "/"
|
||||
}
|
||||
if siteConfig.Cdn == "" {
|
||||
siteConfig.Cdn = strings.TrimSuffix(siteConfig.BasePath, "/")
|
||||
|
@ -1,6 +1,7 @@
|
||||
package static
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -17,6 +18,20 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type ManifestIcon struct {
|
||||
Src string `json:"src"`
|
||||
Sizes string `json:"sizes"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type Manifest struct {
|
||||
Display string `json:"display"`
|
||||
Scope string `json:"scope"`
|
||||
StartURL string `json:"start_url"`
|
||||
Name string `json:"name"`
|
||||
Icons []ManifestIcon `json:"icons"`
|
||||
}
|
||||
|
||||
var static fs.FS
|
||||
|
||||
func initStatic() {
|
||||
@ -77,9 +92,15 @@ func initIndex(siteConfig SiteConfig) {
|
||||
utils.Log.Debug("Successfully read index.html from static files system")
|
||||
}
|
||||
utils.Log.Debug("Replacing placeholders in index.html...")
|
||||
// Construct the correct manifest path based on basePath
|
||||
manifestPath := "/manifest.json"
|
||||
if siteConfig.BasePath != "/" {
|
||||
manifestPath = siteConfig.BasePath + "/manifest.json"
|
||||
}
|
||||
replaceMap := map[string]string{
|
||||
"cdn: undefined": fmt.Sprintf("cdn: '%s'", siteConfig.Cdn),
|
||||
"base_path: undefined": fmt.Sprintf("base_path: '%s'", siteConfig.BasePath),
|
||||
"cdn: undefined": fmt.Sprintf("cdn: '%s'", siteConfig.Cdn),
|
||||
"base_path: undefined": fmt.Sprintf("base_path: '%s'", siteConfig.BasePath),
|
||||
`href="/manifest.json"`: fmt.Sprintf(`href="%s"`, manifestPath),
|
||||
}
|
||||
conf.RawIndexHtml = replaceStrings(conf.RawIndexHtml, replaceMap)
|
||||
UpdateIndex()
|
||||
@ -110,12 +131,57 @@ func UpdateIndex() {
|
||||
utils.Log.Debug("Index.html update completed")
|
||||
}
|
||||
|
||||
func ManifestJSON(c *gin.Context) {
|
||||
// Get site configuration to ensure consistent base path handling
|
||||
siteConfig := getSiteConfig()
|
||||
|
||||
// Get site title from settings
|
||||
siteTitle := setting.GetStr(conf.SiteTitle)
|
||||
|
||||
// Get logo from settings, use the first line (light theme logo)
|
||||
logoSetting := setting.GetStr(conf.Logo)
|
||||
logoUrl := strings.Split(logoSetting, "\n")[0]
|
||||
|
||||
// Use base path from site config for consistency
|
||||
basePath := siteConfig.BasePath
|
||||
|
||||
// Determine scope and start_url
|
||||
// PWA scope and start_url should always point to our application's base path
|
||||
// regardless of whether static resources come from CDN or local server
|
||||
scope := basePath
|
||||
startURL := basePath
|
||||
|
||||
manifest := Manifest{
|
||||
Display: "standalone",
|
||||
Scope: scope,
|
||||
StartURL: startURL,
|
||||
Name: siteTitle,
|
||||
Icons: []ManifestIcon{
|
||||
{
|
||||
Src: logoUrl,
|
||||
Sizes: "512x512",
|
||||
Type: "image/png",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
c.Header("Content-Type", "application/json")
|
||||
c.Header("Cache-Control", "public, max-age=3600") // cache for 1 hour
|
||||
|
||||
if err := json.NewEncoder(c.Writer).Encode(manifest); err != nil {
|
||||
utils.Log.Errorf("Failed to encode manifest.json: %v", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to generate manifest"})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func Static(r *gin.RouterGroup, noRoute func(handlers ...gin.HandlerFunc)) {
|
||||
utils.Log.Debug("Setting up static routes...")
|
||||
siteConfig := getSiteConfig()
|
||||
initStatic()
|
||||
initIndex(siteConfig)
|
||||
folders := []string{"assets", "images", "streamer", "static"}
|
||||
|
||||
if conf.Conf.Cdn == "" {
|
||||
utils.Log.Debug("Setting up static file serving...")
|
||||
r.Use(func(c *gin.Context) {
|
||||
|
@ -233,6 +233,11 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta
|
||||
return http.StatusNotFound, err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
if r.Method == http.MethodHead {
|
||||
w.Header().Set("Content-Type", "httpd/unix-directory")
|
||||
w.Header().Set("Content-Length", "0")
|
||||
return http.StatusOK, nil
|
||||
}
|
||||
return http.StatusMethodNotAllowed, nil
|
||||
}
|
||||
// Let ServeContent determine the Content-Type header.
|
||||
|
Reference in New Issue
Block a user