mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-09-20 12:46:17 +08:00
Compare commits
13 Commits
Author | SHA1 | Date | |
---|---|---|---|
3936e736e6 | |||
68433d4f5b | |||
cc16cb35bf | |||
d3bc6321f4 | |||
cbbb5ad231 | |||
c1d03c5bcc | |||
61a8ed515f | |||
bbb7c06504 | |||
8bbdb272d4 | |||
c15ae94307 | |||
f1a5048558 | |||
1fe26bff9a | |||
433dcd156b |
2
.github/workflows/beta_release.yml
vendored
2
.github/workflows/beta_release.yml
vendored
@ -87,7 +87,7 @@ jobs:
|
|||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: "1.24.5"
|
go-version: "1.25.0"
|
||||||
|
|
||||||
- name: Setup web
|
- name: Setup web
|
||||||
run: bash build.sh dev web
|
run: bash build.sh dev web
|
||||||
|
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: "1.24.5"
|
go-version: "1.25.0"
|
||||||
|
|
||||||
- name: Setup web
|
- name: Setup web
|
||||||
run: bash build.sh dev web
|
run: bash build.sh dev web
|
||||||
|
3
.github/workflows/release.yml
vendored
3
.github/workflows/release.yml
vendored
@ -46,7 +46,7 @@ jobs:
|
|||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.25.0'
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@ -73,4 +73,5 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
files: build/compress/*
|
files: build/compress/*
|
||||||
prerelease: false
|
prerelease: false
|
||||||
|
tag_name: ${{ github.event.release.tag_name }}
|
||||||
|
|
||||||
|
4
.github/workflows/release_docker.yml
vendored
4
.github/workflows/release_docker.yml
vendored
@ -47,7 +47,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 'stable'
|
go-version: '1.25.0'
|
||||||
|
|
||||||
- name: Cache Musl
|
- name: Cache Musl
|
||||||
id: cache-musl
|
id: cache-musl
|
||||||
@ -87,7 +87,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 'stable'
|
go-version: '1.25.0'
|
||||||
|
|
||||||
- name: Cache Musl
|
- name: Cache Musl
|
||||||
id: cache-musl
|
id: cache-musl
|
||||||
|
2
.github/workflows/test_docker.yml
vendored
2
.github/workflows/test_docker.yml
vendored
@ -36,7 +36,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 'stable'
|
go-version: '1.25.0'
|
||||||
|
|
||||||
- name: Cache Musl
|
- name: Cache Musl
|
||||||
id: cache-musl
|
id: cache-musl
|
||||||
|
11
README.md
11
README.md
@ -74,7 +74,6 @@ Thank you for your support and understanding of the OpenList project.
|
|||||||
- [x] [Thunder](https://pan.xunlei.com)
|
- [x] [Thunder](https://pan.xunlei.com)
|
||||||
- [x] [Lanzou](https://www.lanzou.com)
|
- [x] [Lanzou](https://www.lanzou.com)
|
||||||
- [x] [ILanzou](https://www.ilanzou.com)
|
- [x] [ILanzou](https://www.ilanzou.com)
|
||||||
- [x] [Aliyundrive share](https://www.alipan.com)
|
|
||||||
- [x] [Google photo](https://photos.google.com)
|
- [x] [Google photo](https://photos.google.com)
|
||||||
- [x] [Mega.nz](https://mega.nz)
|
- [x] [Mega.nz](https://mega.nz)
|
||||||
- [x] [Baidu photo](https://photo.baidu.com)
|
- [x] [Baidu photo](https://photo.baidu.com)
|
||||||
@ -85,6 +84,16 @@ Thank you for your support and understanding of the OpenList project.
|
|||||||
- [x] [FeijiPan](https://www.feijipan.com)
|
- [x] [FeijiPan](https://www.feijipan.com)
|
||||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
||||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||||
|
- [x] [Chaoxing](https://www.chaoxing.com)
|
||||||
|
- [x] [CNB](https://cnb.cool/)
|
||||||
|
- [x] [Degoo](https://degoo.com)
|
||||||
|
- [x] [Doubao](https://www.doubao.com)
|
||||||
|
- [x] [Febbox](https://www.febbox.com)
|
||||||
|
- [x] [GitHub](https://github.com)
|
||||||
|
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||||
|
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||||
|
- [x] [Weiyun](https://www.weiyun.com)
|
||||||
|
|
||||||
- [x] Easy to deploy and out-of-the-box
|
- [x] Easy to deploy and out-of-the-box
|
||||||
- [x] File preview (PDF, markdown, code, plain text, ...)
|
- [x] File preview (PDF, markdown, code, plain text, ...)
|
||||||
- [x] Image preview in gallery mode
|
- [x] Image preview in gallery mode
|
||||||
|
10
README_cn.md
10
README_cn.md
@ -74,7 +74,6 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
|
|||||||
- [x] [迅雷网盘](https://pan.xunlei.com)
|
- [x] [迅雷网盘](https://pan.xunlei.com)
|
||||||
- [x] [蓝奏云](https://www.lanzou.com)
|
- [x] [蓝奏云](https://www.lanzou.com)
|
||||||
- [x] [蓝奏云优享版](https://www.ilanzou.com)
|
- [x] [蓝奏云优享版](https://www.ilanzou.com)
|
||||||
- [x] [阿里云盘分享](https://www.alipan.com)
|
|
||||||
- [x] [Google 相册](https://photos.google.com)
|
- [x] [Google 相册](https://photos.google.com)
|
||||||
- [x] [Mega.nz](https://mega.nz)
|
- [x] [Mega.nz](https://mega.nz)
|
||||||
- [x] [百度相册](https://photo.baidu.com)
|
- [x] [百度相册](https://photo.baidu.com)
|
||||||
@ -85,6 +84,15 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
|
|||||||
- [x] [飞机盘](https://www.feijipan.com)
|
- [x] [飞机盘](https://www.feijipan.com)
|
||||||
- [x] [多吉云](https://www.dogecloud.com/product/oss)
|
- [x] [多吉云](https://www.dogecloud.com/product/oss)
|
||||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||||
|
- [x] [超星](https://www.chaoxing.com)
|
||||||
|
- [x] [CNB](https://cnb.cool/)
|
||||||
|
- [x] [Degoo](https://degoo.com)
|
||||||
|
- [x] [豆包](https://www.doubao.com)
|
||||||
|
- [x] [Febbox](https://www.febbox.com)
|
||||||
|
- [x] [GitHub](https://github.com)
|
||||||
|
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||||
|
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||||
|
- [x] [微云](https://www.weiyun.com)
|
||||||
- [x] 部署方便,开箱即用
|
- [x] 部署方便,开箱即用
|
||||||
- [x] 文件预览(PDF、markdown、代码、纯文本等)
|
- [x] 文件预览(PDF、markdown、代码、纯文本等)
|
||||||
- [x] 画廊模式下的图片预览
|
- [x] 画廊模式下的图片预览
|
||||||
|
10
README_ja.md
10
README_ja.md
@ -74,7 +74,6 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
|
|||||||
- [x] [Thunder](https://pan.xunlei.com)
|
- [x] [Thunder](https://pan.xunlei.com)
|
||||||
- [x] [Lanzou](https://www.lanzou.com)
|
- [x] [Lanzou](https://www.lanzou.com)
|
||||||
- [x] [ILanzou](https://www.ilanzou.com)
|
- [x] [ILanzou](https://www.ilanzou.com)
|
||||||
- [x] [Aliyundrive share](https://www.alipan.com)
|
|
||||||
- [x] [Google photo](https://photos.google.com)
|
- [x] [Google photo](https://photos.google.com)
|
||||||
- [x] [Mega.nz](https://mega.nz)
|
- [x] [Mega.nz](https://mega.nz)
|
||||||
- [x] [Baidu photo](https://photo.baidu.com)
|
- [x] [Baidu photo](https://photo.baidu.com)
|
||||||
@ -85,6 +84,15 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
|
|||||||
- [x] [FeijiPan](https://www.feijipan.com)
|
- [x] [FeijiPan](https://www.feijipan.com)
|
||||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
||||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||||
|
- [x] [Chaoxing](https://www.chaoxing.com)
|
||||||
|
- [x] [CNB](https://cnb.cool/)
|
||||||
|
- [x] [Degoo](https://degoo.com)
|
||||||
|
- [x] [Doubao](https://www.doubao.com)
|
||||||
|
- [x] [Febbox](https://www.febbox.com)
|
||||||
|
- [x] [GitHub](https://github.com)
|
||||||
|
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||||
|
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||||
|
- [x] [Weiyun](https://www.weiyun.com)
|
||||||
- [x] 簡単にデプロイでき、すぐに使える
|
- [x] 簡単にデプロイでき、すぐに使える
|
||||||
- [x] ファイルプレビュー(PDF、markdown、コード、テキストなど)
|
- [x] ファイルプレビュー(PDF、markdown、コード、テキストなど)
|
||||||
- [x] ギャラリーモードでの画像プレビュー
|
- [x] ギャラリーモードでの画像プレビュー
|
||||||
|
10
README_nl.md
10
README_nl.md
@ -74,7 +74,6 @@ Dank u voor uw ondersteuning en begrip
|
|||||||
- [x] [Thunder](https://pan.xunlei.com)
|
- [x] [Thunder](https://pan.xunlei.com)
|
||||||
- [x] [Lanzou](https://www.lanzou.com)
|
- [x] [Lanzou](https://www.lanzou.com)
|
||||||
- [x] [ILanzou](https://www.ilanzou.com)
|
- [x] [ILanzou](https://www.ilanzou.com)
|
||||||
- [x] [Aliyundrive share](https://www.alipan.com)
|
|
||||||
- [x] [Google photo](https://photos.google.com)
|
- [x] [Google photo](https://photos.google.com)
|
||||||
- [x] [Mega.nz](https://mega.nz)
|
- [x] [Mega.nz](https://mega.nz)
|
||||||
- [x] [Baidu photo](https://photo.baidu.com)
|
- [x] [Baidu photo](https://photo.baidu.com)
|
||||||
@ -85,6 +84,15 @@ Dank u voor uw ondersteuning en begrip
|
|||||||
- [x] [FeijiPan](https://www.feijipan.com)
|
- [x] [FeijiPan](https://www.feijipan.com)
|
||||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
||||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||||
|
- [x] [Chaoxing](https://www.chaoxing.com)
|
||||||
|
- [x] [CNB](https://cnb.cool/)
|
||||||
|
- [x] [Degoo](https://degoo.com)
|
||||||
|
- [x] [Doubao](https://www.doubao.com)
|
||||||
|
- [x] [Febbox](https://www.febbox.com)
|
||||||
|
- [x] [GitHub](https://github.com)
|
||||||
|
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||||
|
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||||
|
- [x] [Weiyun](https://www.weiyun.com)
|
||||||
- [x] Eenvoudig te implementeren en direct te gebruiken
|
- [x] Eenvoudig te implementeren en direct te gebruiken
|
||||||
- [x] Bestandsvoorbeeld (PDF, markdown, code, platte tekst, ...)
|
- [x] Bestandsvoorbeeld (PDF, markdown, code, platte tekst, ...)
|
||||||
- [x] Afbeeldingsvoorbeeld in galerijweergave
|
- [x] Afbeeldingsvoorbeeld in galerijweergave
|
||||||
|
6
build.sh
6
build.sh
@ -236,7 +236,7 @@ BuildRelease() {
|
|||||||
BuildLoongGLIBC() {
|
BuildLoongGLIBC() {
|
||||||
local target_abi="$2"
|
local target_abi="$2"
|
||||||
local output_file="$1"
|
local output_file="$1"
|
||||||
local oldWorldGoVersion="1.24.3"
|
local oldWorldGoVersion="1.25.0"
|
||||||
|
|
||||||
if [ "$target_abi" = "abi1.0" ]; then
|
if [ "$target_abi" = "abi1.0" ]; then
|
||||||
echo building for linux-loong64-abi1.0
|
echo building for linux-loong64-abi1.0
|
||||||
@ -254,13 +254,13 @@ BuildLoongGLIBC() {
|
|||||||
|
|
||||||
# Download and setup patched Go compiler for old-world
|
# Download and setup patched Go compiler for old-world
|
||||||
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
||||||
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250821/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
||||||
-o go-loong64-abi1.0.tar.gz; then
|
-o go-loong64-abi1.0.tar.gz; then
|
||||||
echo "Error: Failed to download patched Go compiler for old-world ABI1.0"
|
echo "Error: Failed to download patched Go compiler for old-world ABI1.0"
|
||||||
if [ -n "$GITHUB_TOKEN" ]; then
|
if [ -n "$GITHUB_TOKEN" ]; then
|
||||||
echo "Error output from curl:"
|
echo "Error output from curl:"
|
||||||
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
||||||
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250821/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
||||||
-o go-loong64-abi1.0.tar.gz || true
|
-o go-loong64-abi1.0.tar.gz || true
|
||||||
fi
|
fi
|
||||||
return 1
|
return 1
|
||||||
|
@ -337,6 +337,27 @@ func (d *Open115) OfflineList(ctx context.Context) (*sdk.OfflineTaskListResp, er
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Open115) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
userInfo, err := d.client.UserInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
total, err := userInfo.RtSpaceInfo.AllTotal.Size.Int64()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
free, err := userInfo.RtSpaceInfo.AllRemain.Size.Int64()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: model.DiskUsage{
|
||||||
|
TotalSpace: uint64(total),
|
||||||
|
FreeSpace: uint64(free),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||||
// return nil, errs.NotImplement
|
// return nil, errs.NotImplement
|
||||||
|
@ -28,7 +28,7 @@ func (f File) CreateTime() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f File) GetHash() utils.HashInfo {
|
func (f File) GetHash() utils.HashInfo {
|
||||||
return utils.HashInfo{}
|
return utils.NewHashInfo(utils.MD5, f.Etag)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f File) GetPath() string {
|
func (f File) GetPath() string {
|
||||||
|
@ -214,5 +214,20 @@ func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
|||||||
return nil, fmt.Errorf("upload complete timeout")
|
return nil, fmt.Errorf("upload complete timeout")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Open123) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
userInfo, err := d.getUserInfo()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
total := userInfo.Data.SpacePermanent + userInfo.Data.SpaceTemp
|
||||||
|
free := total - userInfo.Data.SpaceUsed
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: model.DiskUsage{
|
||||||
|
TotalSpace: total,
|
||||||
|
FreeSpace: free,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*Open123)(nil)
|
var _ driver.Driver = (*Open123)(nil)
|
||||||
var _ driver.PutResult = (*Open123)(nil)
|
var _ driver.PutResult = (*Open123)(nil)
|
||||||
|
@ -133,9 +133,9 @@ type UserInfoResp struct {
|
|||||||
// HeadImage string `json:"headImage"`
|
// HeadImage string `json:"headImage"`
|
||||||
// Passport string `json:"passport"`
|
// Passport string `json:"passport"`
|
||||||
// Mail string `json:"mail"`
|
// Mail string `json:"mail"`
|
||||||
// SpaceUsed int64 `json:"spaceUsed"`
|
SpaceUsed uint64 `json:"spaceUsed"`
|
||||||
// SpacePermanent int64 `json:"spacePermanent"`
|
SpacePermanent uint64 `json:"spacePermanent"`
|
||||||
// SpaceTemp int64 `json:"spaceTemp"`
|
SpaceTemp uint64 `json:"spaceTemp"`
|
||||||
// SpaceTempExpr int64 `json:"spaceTempExpr"`
|
// SpaceTempExpr int64 `json:"spaceTempExpr"`
|
||||||
// Vip bool `json:"vip"`
|
// Vip bool `json:"vip"`
|
||||||
// DirectTraffic int64 `json:"directTraffic"`
|
// DirectTraffic int64 `json:"directTraffic"`
|
||||||
|
@ -24,7 +24,7 @@ type File struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f File) GetHash() utils.HashInfo {
|
func (f File) GetHash() utils.HashInfo {
|
||||||
return utils.HashInfo{}
|
return utils.NewHashInfo(utils.MD5, f.Etag)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f File) GetPath() string {
|
func (f File) GetPath() string {
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
package _189_tv
|
package _189_tv
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"container/ring"
|
|
||||||
"context"
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -12,6 +11,7 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -21,9 +21,10 @@ type Cloud189TV struct {
|
|||||||
client *resty.Client
|
client *resty.Client
|
||||||
tokenInfo *AppSessionResp
|
tokenInfo *AppSessionResp
|
||||||
uploadThread int
|
uploadThread int
|
||||||
familyTransferFolder *ring.Ring
|
|
||||||
cleanFamilyTransferFile func()
|
|
||||||
storageConfig driver.Config
|
storageConfig driver.Config
|
||||||
|
|
||||||
|
TempUuid string
|
||||||
|
cron *cron.Cron // 新增 cron 字段
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189TV) Config() driver.Config {
|
func (y *Cloud189TV) Config() driver.Config {
|
||||||
@ -79,10 +80,17 @@ func (y *Cloud189TV) Init(ctx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
y.cron = cron.NewCron(time.Minute * 5)
|
||||||
|
y.cron.Do(y.keepAlive)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189TV) Drop(ctx context.Context) error {
|
func (y *Cloud189TV) Drop(ctx context.Context) error {
|
||||||
|
if y.cron != nil {
|
||||||
|
y.cron.Stop()
|
||||||
|
y.cron = nil
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,7 +8,6 @@ import (
|
|||||||
type Addition struct {
|
type Addition struct {
|
||||||
driver.RootID
|
driver.RootID
|
||||||
AccessToken string `json:"access_token"`
|
AccessToken string `json:"access_token"`
|
||||||
TempUuid string
|
|
||||||
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
|
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
|
||||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||||
Type string `json:"type" type:"select" options:"personal,family" default:"personal"`
|
Type string `json:"type" type:"select" options:"personal,family" default:"personal"`
|
||||||
|
@ -66,6 +66,10 @@ func (y *Cloud189TV) AppKeySignatureHeader(url, method string) map[string]string
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, isFamily ...bool) ([]byte, error) {
|
func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, isFamily ...bool) ([]byte, error) {
|
||||||
|
return y.requestWithRetry(url, method, callback, params, resp, 0, isFamily...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189TV) requestWithRetry(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, retryCount int, isFamily ...bool) ([]byte, error) {
|
||||||
req := y.client.R().SetQueryParams(clientSuffix())
|
req := y.client.R().SetQueryParams(clientSuffix())
|
||||||
|
|
||||||
if params != nil {
|
if params != nil {
|
||||||
@ -91,8 +95,23 @@ func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, para
|
|||||||
|
|
||||||
if strings.Contains(res.String(), "userSessionBO is null") ||
|
if strings.Contains(res.String(), "userSessionBO is null") ||
|
||||||
strings.Contains(res.String(), "InvalidSessionKey") {
|
strings.Contains(res.String(), "InvalidSessionKey") {
|
||||||
|
// 限制重试次数,避免无限递归
|
||||||
|
if retryCount >= 3 {
|
||||||
|
y.Addition.AccessToken = ""
|
||||||
|
op.MustSaveDriverStorage(y)
|
||||||
|
return nil, errors.New("session expired after retry")
|
||||||
|
}
|
||||||
|
|
||||||
|
// 尝试刷新会话
|
||||||
|
if err := y.refreshSession(); err != nil {
|
||||||
|
// 如果刷新失败,说明AccessToken也已过期,需要重新登录
|
||||||
|
y.Addition.AccessToken = ""
|
||||||
|
op.MustSaveDriverStorage(y)
|
||||||
return nil, errors.New("session expired")
|
return nil, errors.New("session expired")
|
||||||
}
|
}
|
||||||
|
// 如果刷新成功,则重试原始请求(增加重试计数)
|
||||||
|
return y.requestWithRetry(url, method, callback, params, resp, retryCount+1, isFamily...)
|
||||||
|
}
|
||||||
|
|
||||||
// 处理错误
|
// 处理错误
|
||||||
if erron.HasError() {
|
if erron.HasError() {
|
||||||
@ -211,7 +230,7 @@ func (y *Cloud189TV) login() (err error) {
|
|||||||
var erron RespErr
|
var erron RespErr
|
||||||
var tokenInfo AppSessionResp
|
var tokenInfo AppSessionResp
|
||||||
if y.Addition.AccessToken == "" {
|
if y.Addition.AccessToken == "" {
|
||||||
if y.Addition.TempUuid == "" {
|
if y.TempUuid == "" {
|
||||||
// 获取登录参数
|
// 获取登录参数
|
||||||
var uuidInfo UuidInfoResp
|
var uuidInfo UuidInfoResp
|
||||||
req.SetResult(&uuidInfo).SetError(&erron)
|
req.SetResult(&uuidInfo).SetError(&erron)
|
||||||
@ -230,7 +249,7 @@ func (y *Cloud189TV) login() (err error) {
|
|||||||
if uuidInfo.Uuid == "" {
|
if uuidInfo.Uuid == "" {
|
||||||
return errors.New("uuidInfo is empty")
|
return errors.New("uuidInfo is empty")
|
||||||
}
|
}
|
||||||
y.Addition.TempUuid = uuidInfo.Uuid
|
y.TempUuid = uuidInfo.Uuid
|
||||||
op.MustSaveDriverStorage(y)
|
op.MustSaveDriverStorage(y)
|
||||||
|
|
||||||
// 展示二维码
|
// 展示二维码
|
||||||
@ -258,7 +277,7 @@ func (y *Cloud189TV) login() (err error) {
|
|||||||
// Signature
|
// Signature
|
||||||
req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/qrcodeLoginResult.action",
|
req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/qrcodeLoginResult.action",
|
||||||
http.MethodGet))
|
http.MethodGet))
|
||||||
req.SetQueryParam("uuid", y.Addition.TempUuid)
|
req.SetQueryParam("uuid", y.TempUuid)
|
||||||
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/qrcodeLoginResult.action")
|
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/qrcodeLoginResult.action")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
@ -270,7 +289,6 @@ func (y *Cloud189TV) login() (err error) {
|
|||||||
return errors.New("E189AccessToken is empty")
|
return errors.New("E189AccessToken is empty")
|
||||||
}
|
}
|
||||||
y.Addition.AccessToken = accessTokenResp.E189AccessToken
|
y.Addition.AccessToken = accessTokenResp.E189AccessToken
|
||||||
y.Addition.TempUuid = ""
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// 获取SessionKey 和 SessionSecret
|
// 获取SessionKey 和 SessionSecret
|
||||||
@ -294,6 +312,44 @@ func (y *Cloud189TV) login() (err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// refreshSession 尝试使用现有的 AccessToken 刷新会话
|
||||||
|
func (y *Cloud189TV) refreshSession() (err error) {
|
||||||
|
var erron RespErr
|
||||||
|
var tokenInfo AppSessionResp
|
||||||
|
reqb := y.client.R().SetQueryParams(clientSuffix())
|
||||||
|
reqb.SetResult(&tokenInfo).SetError(&erron)
|
||||||
|
// Signature
|
||||||
|
reqb.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/loginFamilyMerge.action",
|
||||||
|
http.MethodGet))
|
||||||
|
reqb.SetQueryParam("e189AccessToken", y.Addition.AccessToken)
|
||||||
|
_, err = reqb.Execute(http.MethodGet, ApiUrl+"/family/manage/loginFamilyMerge.action")
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if erron.HasError() {
|
||||||
|
return &erron
|
||||||
|
}
|
||||||
|
|
||||||
|
y.tokenInfo = &tokenInfo
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189TV) keepAlive() {
|
||||||
|
_, err := y.get(ApiUrl+"/keepUserSession.action", func(r *resty.Request) {
|
||||||
|
r.SetQueryParams(clientSuffix())
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Warnf("189tv: Failed to keep user session alive: %v", err)
|
||||||
|
// 如果keepAlive失败,尝试刷新session
|
||||||
|
if refreshErr := y.refreshSession(); refreshErr != nil {
|
||||||
|
utils.Log.Errorf("189tv: Failed to refresh session after keepAlive error: %v", refreshErr)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
utils.Log.Debugf("189tv: User session kept alive successfully.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, isFamily bool, overwrite bool) (model.Obj, error) {
|
func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||||
fileMd5 := stream.GetHash().GetHash(utils.MD5)
|
fileMd5 := stream.GetHash().GetHash(utils.MD5)
|
||||||
if len(fileMd5) < utils.MD5.Width {
|
if len(fileMd5) < utils.MD5.Width {
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
@ -21,11 +22,11 @@ type Cloud189PC struct {
|
|||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
|
|
||||||
identity string
|
|
||||||
|
|
||||||
client *resty.Client
|
client *resty.Client
|
||||||
|
|
||||||
loginParam *LoginParam
|
loginParam *LoginParam
|
||||||
|
qrcodeParam *QRLoginParam
|
||||||
|
|
||||||
tokenInfo *AppSessionResp
|
tokenInfo *AppSessionResp
|
||||||
|
|
||||||
uploadThread int
|
uploadThread int
|
||||||
@ -35,6 +36,7 @@ type Cloud189PC struct {
|
|||||||
|
|
||||||
storageConfig driver.Config
|
storageConfig driver.Config
|
||||||
ref *Cloud189PC
|
ref *Cloud189PC
|
||||||
|
cron *cron.Cron
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Config() driver.Config {
|
func (y *Cloud189PC) Config() driver.Config {
|
||||||
@ -84,14 +86,22 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// 避免重复登陆
|
// 先尝试用Token刷新,之后尝试登陆
|
||||||
identity := utils.GetMD5EncodeStr(y.Username + y.Password)
|
if y.Addition.RefreshToken != "" {
|
||||||
if !y.isLogin() || y.identity != identity {
|
y.tokenInfo = &AppSessionResp{RefreshToken: y.Addition.RefreshToken}
|
||||||
y.identity = identity
|
if err = y.refreshToken(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
if err = y.login(); err != nil {
|
if err = y.login(); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 初始化并启动 cron 任务
|
||||||
|
y.cron = cron.NewCron(time.Duration(time.Minute * 5))
|
||||||
|
// 每5分钟执行一次 keepAlive
|
||||||
|
y.cron.Do(y.keepAlive)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 处理家庭云ID
|
// 处理家庭云ID
|
||||||
@ -128,6 +138,10 @@ func (d *Cloud189PC) InitReference(storage driver.Driver) error {
|
|||||||
|
|
||||||
func (y *Cloud189PC) Drop(ctx context.Context) error {
|
func (y *Cloud189PC) Drop(ctx context.Context) error {
|
||||||
y.ref = nil
|
y.ref = nil
|
||||||
|
if y.cron != nil {
|
||||||
|
y.cron.Stop()
|
||||||
|
y.cron = nil
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,6 +80,20 @@ func timestamp() int64 {
|
|||||||
return time.Now().UTC().UnixNano() / 1e6
|
return time.Now().UTC().UnixNano() / 1e6
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// formatDate formats a time.Time object into the "YYYY-MM-DDHH:mm:ssSSS" format.
|
||||||
|
func formatDate(t time.Time) string {
|
||||||
|
// The layout string "2006-01-0215:04:05.000" corresponds to:
|
||||||
|
// 2006 -> Year (YYYY)
|
||||||
|
// 01 -> Month (MM)
|
||||||
|
// 02 -> Day (DD)
|
||||||
|
// 15 -> Hour (HH)
|
||||||
|
// 04 -> Minute (mm)
|
||||||
|
// 05 -> Second (ss)
|
||||||
|
// 000 -> Millisecond (SSS) with leading zeros
|
||||||
|
// Note the lack of a separator between the date and hour, matching the desired output.
|
||||||
|
return t.Format("2006-01-0215:04:05.000")
|
||||||
|
}
|
||||||
|
|
||||||
func MustParseTime(str string) *time.Time {
|
func MustParseTime(str string) *time.Time {
|
||||||
lastOpTime, _ := time.ParseInLocation("2006-01-02 15:04:05 -07", str+" +08", time.Local)
|
lastOpTime, _ := time.ParseInLocation("2006-01-02 15:04:05 -07", str+" +08", time.Local)
|
||||||
return &lastOpTime
|
return &lastOpTime
|
||||||
|
@ -6,9 +6,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
|
LoginType string `json:"login_type" type:"select" options:"password,qrcode" default:"password" required:"true"`
|
||||||
Username string `json:"username" required:"true"`
|
Username string `json:"username" required:"true"`
|
||||||
Password string `json:"password" required:"true"`
|
Password string `json:"password" required:"true"`
|
||||||
VCode string `json:"validate_code"`
|
VCode string `json:"validate_code"`
|
||||||
|
RefreshToken string `json:"refresh_token" help:"To switch accounts, please clear this field"`
|
||||||
driver.RootID
|
driver.RootID
|
||||||
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
|
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
|
||||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||||
|
@ -68,15 +68,7 @@ func (e *RespErr) Error() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// 登陆需要的参数
|
type BaseLoginParam struct {
|
||||||
type LoginParam struct {
|
|
||||||
// 加密后的用户名和密码
|
|
||||||
RsaUsername string
|
|
||||||
RsaPassword string
|
|
||||||
|
|
||||||
// rsa密钥
|
|
||||||
jRsaKey string
|
|
||||||
|
|
||||||
// 请求头参数
|
// 请求头参数
|
||||||
Lt string
|
Lt string
|
||||||
ReqId string
|
ReqId string
|
||||||
@ -88,6 +80,27 @@ type LoginParam struct {
|
|||||||
CaptchaToken string
|
CaptchaToken string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// QRLoginParam 用于暂存二维码登录过程中的参数
|
||||||
|
type QRLoginParam struct {
|
||||||
|
BaseLoginParam
|
||||||
|
|
||||||
|
UUID string `json:"uuid"`
|
||||||
|
EncodeUUID string `json:"encodeuuid"`
|
||||||
|
EncryUUID string `json:"encryuuid"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 登陆需要的参数
|
||||||
|
type LoginParam struct {
|
||||||
|
// 加密后的用户名和密码
|
||||||
|
RsaUsername string
|
||||||
|
RsaPassword string
|
||||||
|
|
||||||
|
// rsa密钥
|
||||||
|
jRsaKey string
|
||||||
|
|
||||||
|
BaseLoginParam
|
||||||
|
}
|
||||||
|
|
||||||
// 登陆加密相关
|
// 登陆加密相关
|
||||||
type EncryptConfResp struct {
|
type EncryptConfResp struct {
|
||||||
Result int `json:"result"`
|
Result int `json:"result"`
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
|
"github.com/skip2/go-qrcode"
|
||||||
|
|
||||||
"github.com/avast/retry-go"
|
"github.com/avast/retry-go"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
@ -54,6 +55,9 @@ const (
|
|||||||
MAC = "TELEMAC"
|
MAC = "TELEMAC"
|
||||||
|
|
||||||
CHANNEL_ID = "web_cloud.189.cn"
|
CHANNEL_ID = "web_cloud.189.cn"
|
||||||
|
|
||||||
|
// Error codes
|
||||||
|
UserInvalidOpenTokenError = "UserInvalidOpenToken"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string {
|
func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string {
|
||||||
@ -264,7 +268,14 @@ func (y *Cloud189PC) findFileByName(ctx context.Context, searchName string, fold
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) login() (err error) {
|
func (y *Cloud189PC) login() error {
|
||||||
|
if y.LoginType == "qrcode" {
|
||||||
|
return y.loginByQRCode()
|
||||||
|
}
|
||||||
|
return y.loginByPassword()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) loginByPassword() (err error) {
|
||||||
// 初始化登陆所需参数
|
// 初始化登陆所需参数
|
||||||
if y.loginParam == nil {
|
if y.loginParam == nil {
|
||||||
if err = y.initLoginParam(); err != nil {
|
if err = y.initLoginParam(); err != nil {
|
||||||
@ -278,11 +289,16 @@ func (y *Cloud189PC) login() (err error) {
|
|||||||
// 销毁登陆参数
|
// 销毁登陆参数
|
||||||
y.loginParam = nil
|
y.loginParam = nil
|
||||||
// 遇到错误,重新加载登陆参数(刷新验证码)
|
// 遇到错误,重新加载登陆参数(刷新验证码)
|
||||||
if err != nil && y.NoUseOcr {
|
if err != nil {
|
||||||
|
if y.NoUseOcr {
|
||||||
if err1 := y.initLoginParam(); err1 != nil {
|
if err1 := y.initLoginParam(); err1 != nil {
|
||||||
err = fmt.Errorf("err1: %s \nerr2: %s", err, err1)
|
err = fmt.Errorf("err1: %s \nerr2: %s", err, err1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
y.Status = err.Error()
|
||||||
|
op.MustSaveDriverStorage(y)
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
param := y.loginParam
|
param := y.loginParam
|
||||||
@ -336,14 +352,105 @@ func (y *Cloud189PC) login() (err error) {
|
|||||||
err = fmt.Errorf(tokenInfo.ResMessage)
|
err = fmt.Errorf(tokenInfo.ResMessage)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
y.Addition.RefreshToken = tokenInfo.RefreshToken
|
||||||
y.tokenInfo = &tokenInfo
|
y.tokenInfo = &tokenInfo
|
||||||
|
op.MustSaveDriverStorage(y)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 初始化登陆需要的参数
|
func (y *Cloud189PC) loginByQRCode() error {
|
||||||
* 如果遇到验证码返回错误
|
if y.qrcodeParam == nil {
|
||||||
*/
|
if err := y.initQRCodeParam(); err != nil {
|
||||||
func (y *Cloud189PC) initLoginParam() error {
|
// 二维码也通过错误返回
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var state struct {
|
||||||
|
Status int `json:"status"`
|
||||||
|
RedirectUrl string `json:"redirectUrl"`
|
||||||
|
Msg string `json:"msg"`
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
_, err := y.client.R().
|
||||||
|
SetHeaders(map[string]string{
|
||||||
|
"Referer": AUTH_URL,
|
||||||
|
"Reqid": y.qrcodeParam.ReqId,
|
||||||
|
"lt": y.qrcodeParam.Lt,
|
||||||
|
}).
|
||||||
|
SetFormData(map[string]string{
|
||||||
|
"appId": APP_ID,
|
||||||
|
"clientType": CLIENT_TYPE,
|
||||||
|
"returnUrl": RETURN_URL,
|
||||||
|
"paramId": y.qrcodeParam.ParamId,
|
||||||
|
"uuid": y.qrcodeParam.UUID,
|
||||||
|
"encryuuid": y.qrcodeParam.EncryUUID,
|
||||||
|
"date": formatDate(now),
|
||||||
|
"timeStamp": fmt.Sprint(now.UTC().UnixNano() / 1e6),
|
||||||
|
}).
|
||||||
|
ForceContentType("application/json;charset=UTF-8").
|
||||||
|
SetResult(&state).
|
||||||
|
Post(AUTH_URL + "/api/logbox/oauth2/qrcodeLoginState.do")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check QR code state: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch state.Status {
|
||||||
|
case 0: // 登录成功
|
||||||
|
var tokenInfo AppSessionResp
|
||||||
|
_, err = y.client.R().
|
||||||
|
SetResult(&tokenInfo).
|
||||||
|
SetQueryParams(clientSuffix()).
|
||||||
|
SetQueryParam("redirectURL", state.RedirectUrl).
|
||||||
|
Post(API_URL + "/getSessionForPC.action")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if tokenInfo.ResCode != 0 {
|
||||||
|
return fmt.Errorf(tokenInfo.ResMessage)
|
||||||
|
}
|
||||||
|
y.Addition.RefreshToken = tokenInfo.RefreshToken
|
||||||
|
y.tokenInfo = &tokenInfo
|
||||||
|
op.MustSaveDriverStorage(y)
|
||||||
|
return nil
|
||||||
|
case -11001: // 二维码过期
|
||||||
|
y.qrcodeParam = nil
|
||||||
|
return errors.New("QR code expired, please try again")
|
||||||
|
case -106: // 等待扫描
|
||||||
|
return y.genQRCode("QR code has not been scanned yet, please scan and save again")
|
||||||
|
case -11002: // 等待确认
|
||||||
|
return y.genQRCode("QR code has been scanned, please confirm the login on your phone and save again")
|
||||||
|
default: // 其他错误
|
||||||
|
y.qrcodeParam = nil
|
||||||
|
return fmt.Errorf("QR code login failed with status %d: %s", state.Status, state.Msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) genQRCode(text string) error {
|
||||||
|
// 展示二维码
|
||||||
|
qrTemplate := `<body>
|
||||||
|
state: %s
|
||||||
|
<br><img src="data:image/jpeg;base64,%s"/>
|
||||||
|
<br>Or Click here: <a href="%s">Login</a>
|
||||||
|
</body>`
|
||||||
|
|
||||||
|
// Generate QR code
|
||||||
|
qrCode, err := qrcode.Encode(y.qrcodeParam.UUID, qrcode.Medium, 256)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to generate QR code: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode QR code to base64
|
||||||
|
qrCodeBase64 := base64.StdEncoding.EncodeToString(qrCode)
|
||||||
|
|
||||||
|
// Create the HTML page
|
||||||
|
qrPage := fmt.Sprintf(qrTemplate, text, qrCodeBase64, y.qrcodeParam.UUID)
|
||||||
|
return fmt.Errorf("need verify: \n%s", qrPage)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) initBaseParams() (*BaseLoginParam, error) {
|
||||||
// 清除cookie
|
// 清除cookie
|
||||||
jar, _ := cookiejar.New(nil)
|
jar, _ := cookiejar.New(nil)
|
||||||
y.client.SetCookieJar(jar)
|
y.client.SetCookieJar(jar)
|
||||||
@ -357,17 +464,30 @@ func (y *Cloud189PC) initLoginParam() error {
|
|||||||
}).
|
}).
|
||||||
Get(WEB_URL + "/api/portal/unifyLoginForPC.action")
|
Get(WEB_URL + "/api/portal/unifyLoginForPC.action")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
param := LoginParam{
|
return &BaseLoginParam{
|
||||||
CaptchaToken: regexp.MustCompile(`'captchaToken' value='(.+?)'`).FindStringSubmatch(res.String())[1],
|
CaptchaToken: regexp.MustCompile(`'captchaToken' value='(.+?)'`).FindStringSubmatch(res.String())[1],
|
||||||
Lt: regexp.MustCompile(`lt = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
Lt: regexp.MustCompile(`lt = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
||||||
ParamId: regexp.MustCompile(`paramId = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
ParamId: regexp.MustCompile(`paramId = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
||||||
ReqId: regexp.MustCompile(`reqId = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
ReqId: regexp.MustCompile(`reqId = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
||||||
// jRsaKey: regexp.MustCompile(`"j_rsaKey" value="(.+?)"`).FindStringSubmatch(res.String())[1],
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 初始化登陆需要的参数
|
||||||
|
* 如果遇到验证码返回错误
|
||||||
|
*/
|
||||||
|
func (y *Cloud189PC) initLoginParam() error {
|
||||||
|
y.loginParam = nil
|
||||||
|
|
||||||
|
baseParam, err := y.initBaseParams()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
y.loginParam = &LoginParam{BaseLoginParam: *baseParam}
|
||||||
|
|
||||||
// 获取rsa公钥
|
// 获取rsa公钥
|
||||||
var encryptConf EncryptConfResp
|
var encryptConf EncryptConfResp
|
||||||
_, err = y.client.R().
|
_, err = y.client.R().
|
||||||
@ -378,18 +498,17 @@ func (y *Cloud189PC) initLoginParam() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
param.jRsaKey = fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", encryptConf.Data.PubKey)
|
y.loginParam.jRsaKey = fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", encryptConf.Data.PubKey)
|
||||||
param.RsaUsername = encryptConf.Data.Pre + RsaEncrypt(param.jRsaKey, y.Username)
|
y.loginParam.RsaUsername = encryptConf.Data.Pre + RsaEncrypt(y.loginParam.jRsaKey, y.Username)
|
||||||
param.RsaPassword = encryptConf.Data.Pre + RsaEncrypt(param.jRsaKey, y.Password)
|
y.loginParam.RsaPassword = encryptConf.Data.Pre + RsaEncrypt(y.loginParam.jRsaKey, y.Password)
|
||||||
y.loginParam = ¶m
|
|
||||||
|
|
||||||
// 判断是否需要验证码
|
// 判断是否需要验证码
|
||||||
resp, err := y.client.R().
|
resp, err := y.client.R().
|
||||||
SetHeader("REQID", param.ReqId).
|
SetHeader("REQID", y.loginParam.ReqId).
|
||||||
SetFormData(map[string]string{
|
SetFormData(map[string]string{
|
||||||
"appKey": APP_ID,
|
"appKey": APP_ID,
|
||||||
"accountType": ACCOUNT_TYPE,
|
"accountType": ACCOUNT_TYPE,
|
||||||
"userName": param.RsaUsername,
|
"userName": y.loginParam.RsaUsername,
|
||||||
}).Post(AUTH_URL + "/api/logbox/oauth2/needcaptcha.do")
|
}).Post(AUTH_URL + "/api/logbox/oauth2/needcaptcha.do")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -401,8 +520,8 @@ func (y *Cloud189PC) initLoginParam() error {
|
|||||||
// 拉取验证码
|
// 拉取验证码
|
||||||
imgRes, err := y.client.R().
|
imgRes, err := y.client.R().
|
||||||
SetQueryParams(map[string]string{
|
SetQueryParams(map[string]string{
|
||||||
"token": param.CaptchaToken,
|
"token": y.loginParam.CaptchaToken,
|
||||||
"REQID": param.ReqId,
|
"REQID": y.loginParam.ReqId,
|
||||||
"rnd": fmt.Sprint(timestamp()),
|
"rnd": fmt.Sprint(timestamp()),
|
||||||
}).
|
}).
|
||||||
Get(AUTH_URL + "/api/logbox/oauth2/picCaptcha.do")
|
Get(AUTH_URL + "/api/logbox/oauth2/picCaptcha.do")
|
||||||
@ -429,10 +548,38 @@ func (y *Cloud189PC) initLoginParam() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getQRCode 获取并返回二维码
|
||||||
|
func (y *Cloud189PC) initQRCodeParam() (err error) {
|
||||||
|
y.qrcodeParam = nil
|
||||||
|
|
||||||
|
baseParam, err := y.initBaseParams()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var qrcodeParam QRLoginParam
|
||||||
|
_, err = y.client.R().
|
||||||
|
SetFormData(map[string]string{"appId": APP_ID}).
|
||||||
|
ForceContentType("application/json;charset=UTF-8").
|
||||||
|
SetResult(&qrcodeParam).
|
||||||
|
Post(AUTH_URL + "/api/logbox/oauth2/getUUID.do")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
qrcodeParam.BaseLoginParam = *baseParam
|
||||||
|
y.qrcodeParam = &qrcodeParam
|
||||||
|
|
||||||
|
return y.genQRCode("please scan the QR code with the 189 Cloud app, then save the settings again.")
|
||||||
|
}
|
||||||
|
|
||||||
// 刷新会话
|
// 刷新会话
|
||||||
func (y *Cloud189PC) refreshSession() (err error) {
|
func (y *Cloud189PC) refreshSession() (err error) {
|
||||||
|
return y.refreshSessionWithRetry(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) refreshSessionWithRetry(retryCount int) (err error) {
|
||||||
if y.ref != nil {
|
if y.ref != nil {
|
||||||
return y.ref.refreshSession()
|
return y.ref.refreshSessionWithRetry(retryCount)
|
||||||
}
|
}
|
||||||
var erron RespErr
|
var erron RespErr
|
||||||
var userSessionResp UserSessionResp
|
var userSessionResp UserSessionResp
|
||||||
@ -449,24 +596,87 @@ func (y *Cloud189PC) refreshSession() (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// 错误影响正常访问,下线该储存
|
// token生效刷新token
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
y.GetStorage().SetStatus(fmt.Sprintf("%+v", err.Error()))
|
|
||||||
op.MustSaveDriverStorage(y)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if erron.HasError() {
|
if erron.HasError() {
|
||||||
if erron.ResCode == "UserInvalidOpenToken" {
|
if erron.ResCode == UserInvalidOpenTokenError {
|
||||||
if err = y.login(); err != nil {
|
return y.refreshTokenWithRetry(retryCount)
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return &erron
|
return &erron
|
||||||
}
|
}
|
||||||
y.tokenInfo.UserSessionResp = userSessionResp
|
y.tokenInfo.UserSessionResp = userSessionResp
|
||||||
return
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// refreshToken 刷新token,失败时返回错误,不再直接调用login
|
||||||
|
func (y *Cloud189PC) refreshToken() (err error) {
|
||||||
|
return y.refreshTokenWithRetry(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) refreshTokenWithRetry(retryCount int) (err error) {
|
||||||
|
if y.ref != nil {
|
||||||
|
return y.ref.refreshTokenWithRetry(retryCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 限制重试次数,避免无限递归
|
||||||
|
if retryCount >= 3 {
|
||||||
|
if y.Addition.RefreshToken != "" {
|
||||||
|
y.Addition.RefreshToken = ""
|
||||||
|
op.MustSaveDriverStorage(y)
|
||||||
|
}
|
||||||
|
return errors.New("refresh token failed after maximum retries")
|
||||||
|
}
|
||||||
|
|
||||||
|
var erron RespErr
|
||||||
|
var tokenInfo AppSessionResp
|
||||||
|
_, err = y.client.R().
|
||||||
|
SetResult(&tokenInfo).
|
||||||
|
ForceContentType("application/json;charset=UTF-8").
|
||||||
|
SetError(&erron).
|
||||||
|
SetFormData(map[string]string{
|
||||||
|
"clientId": APP_ID,
|
||||||
|
"refreshToken": y.tokenInfo.RefreshToken,
|
||||||
|
"grantType": "refresh_token",
|
||||||
|
"format": "json",
|
||||||
|
}).
|
||||||
|
Post(AUTH_URL + "/api/oauth2/refreshToken.do")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 如果刷新失败,返回错误给上层处理
|
||||||
|
if erron.HasError() {
|
||||||
|
if y.Addition.RefreshToken != "" {
|
||||||
|
y.Addition.RefreshToken = ""
|
||||||
|
op.MustSaveDriverStorage(y)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 根据登录类型决定下一步行为
|
||||||
|
if y.LoginType == "qrcode" {
|
||||||
|
return errors.New("QR code session has expired, please re-scan the code to log in")
|
||||||
|
}
|
||||||
|
// 密码登录模式下,尝试回退到完整登录
|
||||||
|
return y.login()
|
||||||
|
}
|
||||||
|
|
||||||
|
y.Addition.RefreshToken = tokenInfo.RefreshToken
|
||||||
|
y.tokenInfo = &tokenInfo
|
||||||
|
op.MustSaveDriverStorage(y)
|
||||||
|
return y.refreshSessionWithRetry(retryCount + 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) keepAlive() {
|
||||||
|
_, err := y.get(API_URL+"/keepUserSession.action", func(r *resty.Request) {
|
||||||
|
r.SetQueryParams(clientSuffix())
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Warnf("189pc: Failed to keep user session alive: %v", err)
|
||||||
|
// 如果keepAlive失败,尝试刷新session
|
||||||
|
if refreshErr := y.refreshSession(); refreshErr != nil {
|
||||||
|
utils.Log.Errorf("189pc: Failed to refresh session after keepAlive error: %v", refreshErr)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
utils.Log.Debugf("189pc: User session kept alive successfully.")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 普通上传
|
// 普通上传
|
||||||
@ -575,8 +785,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
|
|
||||||
// step.4 上传切片
|
// step.4 上传切片
|
||||||
uploadUrl := uploadUrls[0]
|
uploadUrl := uploadUrls[0]
|
||||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
|
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily)
|
||||||
driver.NewLimitedUploadStream(ctx, rateLimitedRd), isFamily)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -79,21 +79,45 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, errs.ObjectNotFound
|
return nil, errs.ObjectNotFound
|
||||||
}
|
}
|
||||||
|
var ret *model.Object
|
||||||
|
provider := ""
|
||||||
for _, dst := range dsts {
|
for _, dst := range dsts {
|
||||||
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
|
rawPath := stdpath.Join(dst, sub)
|
||||||
|
obj, err := fs.Get(ctx, rawPath, &fs.GetArgs{NoLog: true})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return &model.Object{
|
storage, err := fs.GetStorage(rawPath, &fs.GetStoragesArgs{})
|
||||||
|
if ret == nil {
|
||||||
|
ret = &model.Object{
|
||||||
Path: path,
|
Path: path,
|
||||||
Name: obj.GetName(),
|
Name: obj.GetName(),
|
||||||
Size: obj.GetSize(),
|
Size: obj.GetSize(),
|
||||||
Modified: obj.ModTime(),
|
Modified: obj.ModTime(),
|
||||||
IsFolder: obj.IsDir(),
|
IsFolder: obj.IsDir(),
|
||||||
HashInfo: obj.GetHash(),
|
HashInfo: obj.GetHash(),
|
||||||
|
}
|
||||||
|
if !d.ProviderPassThrough || err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
provider = storage.Config().Name
|
||||||
|
} else if err != nil || provider != storage.GetStorage().Driver {
|
||||||
|
provider = ""
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ret == nil {
|
||||||
|
return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
if provider != "" {
|
||||||
|
return &model.ObjectProvider{
|
||||||
|
Object: *ret,
|
||||||
|
Provider: model.Provider{
|
||||||
|
Provider: provider,
|
||||||
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
return nil, errs.ObjectNotFound
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
@ -186,6 +210,35 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
return nil, errs.ObjectNotFound
|
return nil, errs.ObjectNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Alias) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
root, sub := d.getRootAndPath(args.Obj.GetPath())
|
||||||
|
dsts, ok := d.pathMap[root]
|
||||||
|
if !ok {
|
||||||
|
return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
for _, dst := range dsts {
|
||||||
|
rawPath := stdpath.Join(dst, sub)
|
||||||
|
storage, actualPath, err := op.GetStorageAndActualPath(rawPath)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
other, ok := storage.(driver.Other)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
obj, err := op.GetUnwrap(ctx, storage, actualPath)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return other.Other(ctx, model.OtherArgs{
|
||||||
|
Obj: obj,
|
||||||
|
Method: args.Method,
|
||||||
|
Data: args.Data,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
if !d.Writable {
|
if !d.Writable {
|
||||||
return errs.PermissionDenied
|
return errs.PermissionDenied
|
||||||
|
@ -15,6 +15,7 @@ type Addition struct {
|
|||||||
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
|
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
|
||||||
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
|
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
|
||||||
Writable bool `json:"writable" type:"bool" default:"false"`
|
Writable bool `json:"writable" type:"bool" default:"false"`
|
||||||
|
ProviderPassThrough bool `json:"provider_pass_through" type:"bool" default:"false"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -291,6 +291,21 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *AliyundriveOpen) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
res, err := d.request(ctx, limiterOther, "/adrive/v1.0/user/getSpaceInfo", http.MethodPost, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
total := utils.Json.Get(res, "personal_space_info", "total_size").ToUint64()
|
||||||
|
used := utils.Json.Get(res, "personal_space_info", "used_size").ToUint64()
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: model.DiskUsage{
|
||||||
|
TotalSpace: total,
|
||||||
|
FreeSpace: total - used,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*AliyundriveOpen)(nil)
|
var _ driver.Driver = (*AliyundriveOpen)(nil)
|
||||||
var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
|
var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
|
||||||
var _ driver.MoveResult = (*AliyundriveOpen)(nil)
|
var _ driver.MoveResult = (*AliyundriveOpen)(nil)
|
||||||
|
@ -20,8 +20,10 @@ import (
|
|||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_netdisk"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_netdisk"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_photo"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_photo"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/chaoxing"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/chaoxing"
|
||||||
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/chunk"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve_v4"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve_v4"
|
||||||
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/cnb_releases"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/crypt"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/crypt"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/degoo"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/degoo"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao"
|
||||||
|
@ -364,4 +364,12 @@ func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *BaiduNetdisk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
du, err := d.quota()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{DiskUsage: *du}, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*BaiduNetdisk)(nil)
|
var _ driver.Driver = (*BaiduNetdisk)(nil)
|
||||||
|
@ -189,3 +189,12 @@ type PrecreateResp struct {
|
|||||||
// return_type=2
|
// return_type=2
|
||||||
File File `json:"info"`
|
File File `json:"info"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type QuotaResp struct {
|
||||||
|
Errno int `json:"errno"`
|
||||||
|
RequestId int64 `json:"request_id"`
|
||||||
|
Total uint64 `json:"total"`
|
||||||
|
Used uint64 `json:"used"`
|
||||||
|
//Free uint64 `json:"free"`
|
||||||
|
//Expire bool `json:"expire"`
|
||||||
|
}
|
||||||
|
@ -381,6 +381,18 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
|||||||
return maxSliceSize
|
return maxSliceSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *BaiduNetdisk) quota() (*model.DiskUsage, error) {
|
||||||
|
var resp QuotaResp
|
||||||
|
_, err := d.request("https://pan.baidu.com/api/quota", http.MethodGet, nil, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.DiskUsage{
|
||||||
|
TotalSpace: resp.Total,
|
||||||
|
FreeSpace: resp.Total - resp.Used,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// func encodeURIComponent(str string) string {
|
// func encodeURIComponent(str string) string {
|
||||||
// r := url.QueryEscape(str)
|
// r := url.QueryEscape(str)
|
||||||
// r = strings.ReplaceAll(r, "+", "%20")
|
// r = strings.ReplaceAll(r, "+", "%20")
|
||||||
|
488
drivers/chunk/driver.go
Normal file
488
drivers/chunk/driver.go
Normal file
@ -0,0 +1,488 @@
|
|||||||
|
package chunk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
stdpath "path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Chunk struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Init(ctx context.Context) error {
|
||||||
|
if d.PartSize <= 0 {
|
||||||
|
return errors.New("part size must be positive")
|
||||||
|
}
|
||||||
|
d.RemotePath = utils.FixAndCleanPath(d.RemotePath)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Drop(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||||
|
if utils.PathEqual(path, "/") {
|
||||||
|
return &model.Object{
|
||||||
|
Name: "Root",
|
||||||
|
IsFolder: true,
|
||||||
|
Path: "/",
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
remoteActualPath = stdpath.Join(remoteActualPath, path)
|
||||||
|
if remoteObj, err := op.Get(ctx, remoteStorage, remoteActualPath); err == nil {
|
||||||
|
return &model.Object{
|
||||||
|
Path: path,
|
||||||
|
Name: remoteObj.GetName(),
|
||||||
|
Size: remoteObj.GetSize(),
|
||||||
|
Modified: remoteObj.ModTime(),
|
||||||
|
IsFolder: remoteObj.IsDir(),
|
||||||
|
HashInfo: remoteObj.GetHash(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
remoteActualDir, name := stdpath.Split(remoteActualPath)
|
||||||
|
chunkName := "[openlist_chunk]" + name
|
||||||
|
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, chunkName), model.ListArgs{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var totalSize int64 = 0
|
||||||
|
// 0号块必须存在
|
||||||
|
chunkSizes := []int64{-1}
|
||||||
|
h := make(map[*utils.HashType]string)
|
||||||
|
var first model.Obj
|
||||||
|
for _, o := range chunkObjs {
|
||||||
|
if o.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if after, ok := strings.CutPrefix(o.GetName(), "hash_"); ok {
|
||||||
|
hn, value, ok := strings.Cut(strings.TrimSuffix(after, d.CustomExt), "_")
|
||||||
|
if ok {
|
||||||
|
ht, ok := utils.GetHashByName(hn)
|
||||||
|
if ok {
|
||||||
|
h[ht] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalSize += o.GetSize()
|
||||||
|
if len(chunkSizes) > idx {
|
||||||
|
if idx == 0 {
|
||||||
|
first = o
|
||||||
|
}
|
||||||
|
chunkSizes[idx] = o.GetSize()
|
||||||
|
} else if len(chunkSizes) == idx {
|
||||||
|
chunkSizes = append(chunkSizes, o.GetSize())
|
||||||
|
} else {
|
||||||
|
newChunkSizes := make([]int64, idx+1)
|
||||||
|
copy(newChunkSizes, chunkSizes)
|
||||||
|
chunkSizes = newChunkSizes
|
||||||
|
chunkSizes[idx] = o.GetSize()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// 检查0号块不等于-1 以支持空文件
|
||||||
|
// 如果块数量大于1 最后一块不可能为0
|
||||||
|
// 只检查中间块是否有0
|
||||||
|
for i, l := 0, len(chunkSizes)-2; ; i++ {
|
||||||
|
if i == 0 {
|
||||||
|
if chunkSizes[i] == -1 {
|
||||||
|
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||||
|
}
|
||||||
|
} else if chunkSizes[i] == 0 {
|
||||||
|
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||||
|
}
|
||||||
|
if i >= l {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
reqDir, _ := stdpath.Split(path)
|
||||||
|
objRes := chunkObject{
|
||||||
|
Object: model.Object{
|
||||||
|
Path: stdpath.Join(reqDir, chunkName),
|
||||||
|
Name: name,
|
||||||
|
Size: totalSize,
|
||||||
|
Modified: first.ModTime(),
|
||||||
|
Ctime: first.CreateTime(),
|
||||||
|
},
|
||||||
|
chunkSizes: chunkSizes,
|
||||||
|
}
|
||||||
|
if len(h) > 0 {
|
||||||
|
objRes.HashInfo = utils.NewHashInfoByMap(h)
|
||||||
|
}
|
||||||
|
return &objRes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
remoteActualDir := stdpath.Join(remoteActualPath, dir.GetPath())
|
||||||
|
remoteObjs, err := op.List(ctx, remoteStorage, remoteActualDir, model.ListArgs{
|
||||||
|
ReqPath: args.ReqPath,
|
||||||
|
Refresh: args.Refresh,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
result := make([]model.Obj, 0, len(remoteObjs))
|
||||||
|
for _, obj := range remoteObjs {
|
||||||
|
rawName := obj.GetName()
|
||||||
|
if obj.IsDir() {
|
||||||
|
if name, ok := strings.CutPrefix(rawName, "[openlist_chunk]"); ok {
|
||||||
|
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, rawName), model.ListArgs{
|
||||||
|
ReqPath: stdpath.Join(args.ReqPath, rawName),
|
||||||
|
Refresh: args.Refresh,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
totalSize := int64(0)
|
||||||
|
h := make(map[*utils.HashType]string)
|
||||||
|
first := obj
|
||||||
|
for _, o := range chunkObjs {
|
||||||
|
if o.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if after, ok := strings.CutPrefix(strings.TrimSuffix(o.GetName(), d.CustomExt), "hash_"); ok {
|
||||||
|
hn, value, ok := strings.Cut(after, "_")
|
||||||
|
if ok {
|
||||||
|
ht, ok := utils.GetHashByName(hn)
|
||||||
|
if ok {
|
||||||
|
h[ht] = value
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if idx == 0 {
|
||||||
|
first = o
|
||||||
|
}
|
||||||
|
totalSize += o.GetSize()
|
||||||
|
}
|
||||||
|
objRes := model.Object{
|
||||||
|
Name: name,
|
||||||
|
Size: totalSize,
|
||||||
|
Modified: first.ModTime(),
|
||||||
|
Ctime: first.CreateTime(),
|
||||||
|
}
|
||||||
|
if len(h) > 0 {
|
||||||
|
objRes.HashInfo = utils.NewHashInfoByMap(h)
|
||||||
|
}
|
||||||
|
if !d.Thumbnail {
|
||||||
|
result = append(result, &objRes)
|
||||||
|
} else {
|
||||||
|
thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp")
|
||||||
|
thumb := fmt.Sprintf("%s/d%s?sign=%s",
|
||||||
|
common.GetApiUrl(ctx),
|
||||||
|
utils.EncodePath(thumbPath, true),
|
||||||
|
sign.Sign(thumbPath))
|
||||||
|
result = append(result, &model.ObjThumb{
|
||||||
|
Object: objRes,
|
||||||
|
Thumbnail: model.Thumbnail{
|
||||||
|
Thumbnail: thumb,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !d.ShowHidden && strings.HasPrefix(rawName, ".") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
thumb, ok := model.GetThumb(obj)
|
||||||
|
objRes := model.Object{
|
||||||
|
Name: rawName,
|
||||||
|
Size: obj.GetSize(),
|
||||||
|
Modified: obj.ModTime(),
|
||||||
|
IsFolder: obj.IsDir(),
|
||||||
|
HashInfo: obj.GetHash(),
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
result = append(result, &objRes)
|
||||||
|
} else {
|
||||||
|
result = append(result, &model.ObjThumb{
|
||||||
|
Object: objRes,
|
||||||
|
Thumbnail: model.Thumbnail{
|
||||||
|
Thumbnail: thumb,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
chunkFile, ok := file.(*chunkObject)
|
||||||
|
remoteActualPath = stdpath.Join(remoteActualPath, file.GetPath())
|
||||||
|
if !ok {
|
||||||
|
l, _, err := op.Link(ctx, remoteStorage, remoteActualPath, args)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resultLink := *l
|
||||||
|
resultLink.SyncClosers = utils.NewSyncClosers(l)
|
||||||
|
return &resultLink, nil
|
||||||
|
}
|
||||||
|
fileSize := chunkFile.GetSize()
|
||||||
|
mergedRrf := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||||
|
start := httpRange.Start
|
||||||
|
length := httpRange.Length
|
||||||
|
if length < 0 || start+length > fileSize {
|
||||||
|
length = fileSize - start
|
||||||
|
}
|
||||||
|
if length == 0 {
|
||||||
|
return io.NopCloser(strings.NewReader("")), nil
|
||||||
|
}
|
||||||
|
rs := make([]io.Reader, 0)
|
||||||
|
cs := make(utils.Closers, 0)
|
||||||
|
var (
|
||||||
|
rc io.ReadCloser
|
||||||
|
readFrom bool
|
||||||
|
)
|
||||||
|
for idx, chunkSize := range chunkFile.chunkSizes {
|
||||||
|
if readFrom {
|
||||||
|
l, o, err := op.Link(ctx, remoteStorage, stdpath.Join(remoteActualPath, d.getPartName(idx)), args)
|
||||||
|
if err != nil {
|
||||||
|
_ = cs.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cs = append(cs, l)
|
||||||
|
chunkSize2 := l.ContentLength
|
||||||
|
if chunkSize2 <= 0 {
|
||||||
|
chunkSize2 = o.GetSize()
|
||||||
|
}
|
||||||
|
if chunkSize2 != chunkSize {
|
||||||
|
_ = cs.Close()
|
||||||
|
return nil, fmt.Errorf("chunk part[%d] size not match", idx)
|
||||||
|
}
|
||||||
|
rrf, err := stream.GetRangeReaderFromLink(chunkSize2, l)
|
||||||
|
if err != nil {
|
||||||
|
_ = cs.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
newLength := length - chunkSize2
|
||||||
|
if newLength >= 0 {
|
||||||
|
length = newLength
|
||||||
|
rc, err = rrf.RangeRead(ctx, http_range.Range{Length: -1})
|
||||||
|
} else {
|
||||||
|
rc, err = rrf.RangeRead(ctx, http_range.Range{Length: length})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
_ = cs.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rs = append(rs, rc)
|
||||||
|
cs = append(cs, rc)
|
||||||
|
if newLength <= 0 {
|
||||||
|
return utils.ReadCloser{
|
||||||
|
Reader: io.MultiReader(rs...),
|
||||||
|
Closer: &cs,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
} else if newStart := start - chunkSize; newStart >= 0 {
|
||||||
|
start = newStart
|
||||||
|
} else {
|
||||||
|
l, o, err := op.Link(ctx, remoteStorage, stdpath.Join(remoteActualPath, d.getPartName(idx)), args)
|
||||||
|
if err != nil {
|
||||||
|
_ = cs.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cs = append(cs, l)
|
||||||
|
chunkSize2 := l.ContentLength
|
||||||
|
if chunkSize2 <= 0 {
|
||||||
|
chunkSize2 = o.GetSize()
|
||||||
|
}
|
||||||
|
if chunkSize2 != chunkSize {
|
||||||
|
_ = cs.Close()
|
||||||
|
return nil, fmt.Errorf("chunk part[%d] size not match", idx)
|
||||||
|
}
|
||||||
|
rrf, err := stream.GetRangeReaderFromLink(chunkSize2, l)
|
||||||
|
if err != nil {
|
||||||
|
_ = cs.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rc, err = rrf.RangeRead(ctx, http_range.Range{Start: start, Length: -1})
|
||||||
|
if err != nil {
|
||||||
|
_ = cs.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
length -= chunkSize2 - start
|
||||||
|
cs = append(cs, rc)
|
||||||
|
if length <= 0 {
|
||||||
|
return utils.ReadCloser{
|
||||||
|
Reader: rc,
|
||||||
|
Closer: &cs,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
rs = append(rs, rc)
|
||||||
|
readFrom = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("invalid range: start=%d,length=%d,fileSize=%d", httpRange.Start, httpRange.Length, fileSize)
|
||||||
|
}
|
||||||
|
return &model.Link{
|
||||||
|
RangeReader: stream.RangeReaderFunc(mergedRrf),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
path := stdpath.Join(d.RemotePath, parentDir.GetPath(), dirName)
|
||||||
|
return fs.MakeDir(ctx, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
src := stdpath.Join(d.RemotePath, srcObj.GetPath())
|
||||||
|
dst := stdpath.Join(d.RemotePath, dstDir.GetPath())
|
||||||
|
_, err := fs.Move(ctx, src, dst)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
|
if _, ok := srcObj.(*chunkObject); ok {
|
||||||
|
newName = "[openlist_chunk]" + newName
|
||||||
|
}
|
||||||
|
return fs.Rename(ctx, stdpath.Join(d.RemotePath, srcObj.GetPath()), newName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
dst := stdpath.Join(d.RemotePath, dstDir.GetPath())
|
||||||
|
src := stdpath.Join(d.RemotePath, srcObj.GetPath())
|
||||||
|
_, err := fs.Copy(ctx, src, dst)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
return fs.Remove(ctx, stdpath.Join(d.RemotePath, obj.GetPath()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
|
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if d.Thumbnail && dstDir.GetName() == ".thumbnails" {
|
||||||
|
return op.Put(ctx, remoteStorage, stdpath.Join(remoteActualPath, dstDir.GetPath()), file, up)
|
||||||
|
}
|
||||||
|
upReader := &driver.ReaderUpdatingProgress{
|
||||||
|
Reader: file,
|
||||||
|
UpdateProgress: up,
|
||||||
|
}
|
||||||
|
dst := stdpath.Join(remoteActualPath, dstDir.GetPath(), "[openlist_chunk]"+file.GetName())
|
||||||
|
if d.StoreHash {
|
||||||
|
for ht, value := range file.GetHash().All() {
|
||||||
|
_ = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||||
|
Obj: &model.Object{
|
||||||
|
Name: fmt.Sprintf("hash_%s_%s%s", ht.Name, value, d.CustomExt),
|
||||||
|
Size: 1,
|
||||||
|
Modified: file.ModTime(),
|
||||||
|
},
|
||||||
|
Mimetype: "application/octet-stream",
|
||||||
|
Reader: bytes.NewReader([]byte{0}), // 兼容不支持空文件的驱动
|
||||||
|
}, nil, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fullPartCount := int(file.GetSize() / d.PartSize)
|
||||||
|
tailSize := file.GetSize() % d.PartSize
|
||||||
|
if tailSize == 0 && fullPartCount > 0 {
|
||||||
|
fullPartCount--
|
||||||
|
tailSize = d.PartSize
|
||||||
|
}
|
||||||
|
partIndex := 0
|
||||||
|
for partIndex < fullPartCount {
|
||||||
|
err = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||||
|
Obj: &model.Object{
|
||||||
|
Name: d.getPartName(partIndex),
|
||||||
|
Size: d.PartSize,
|
||||||
|
Modified: file.ModTime(),
|
||||||
|
},
|
||||||
|
Mimetype: file.GetMimetype(),
|
||||||
|
Reader: io.LimitReader(upReader, d.PartSize),
|
||||||
|
}, nil, true)
|
||||||
|
if err != nil {
|
||||||
|
_ = op.Remove(ctx, remoteStorage, dst)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
partIndex++
|
||||||
|
}
|
||||||
|
err = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||||
|
Obj: &model.Object{
|
||||||
|
Name: d.getPartName(fullPartCount),
|
||||||
|
Size: tailSize,
|
||||||
|
Modified: file.ModTime(),
|
||||||
|
},
|
||||||
|
Mimetype: file.GetMimetype(),
|
||||||
|
Reader: upReader,
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
_ = op.Remove(ctx, remoteStorage, dst)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) getPartName(part int) string {
|
||||||
|
return fmt.Sprintf("%d%s", part, d.CustomExt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
remoteStorage, err := fs.GetStorage(d.RemotePath, &fs.GetStoragesArgs{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
wd, ok := remoteStorage.(driver.WithDetails)
|
||||||
|
if !ok {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
remoteDetails, err := wd.GetDetails(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: remoteDetails.DiskUsage,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*Chunk)(nil)
|
31
drivers/chunk/meta.go
Normal file
31
drivers/chunk/meta.go
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
package chunk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
RemotePath string `json:"remote_path" required:"true"`
|
||||||
|
PartSize int64 `json:"part_size" required:"true" type:"number" help:"bytes"`
|
||||||
|
CustomExt string `json:"custom_ext" type:"string"`
|
||||||
|
StoreHash bool `json:"store_hash" type:"bool" default:"true"`
|
||||||
|
|
||||||
|
Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"`
|
||||||
|
ShowHidden bool `json:"show_hidden" default:"true" required:"false" help:"show hidden directories and files"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "Chunk",
|
||||||
|
LocalSort: true,
|
||||||
|
OnlyProxy: true,
|
||||||
|
NoCache: true,
|
||||||
|
DefaultRoot: "/",
|
||||||
|
NoLinkURL: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &Chunk{}
|
||||||
|
})
|
||||||
|
}
|
8
drivers/chunk/obj.go
Normal file
8
drivers/chunk/obj.go
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
package chunk
|
||||||
|
|
||||||
|
import "github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
|
||||||
|
type chunkObject struct {
|
||||||
|
model.Object
|
||||||
|
chunkSizes []int64
|
||||||
|
}
|
@ -339,6 +339,21 @@ func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir mode
|
|||||||
return nil, errs.NotImplement
|
return nil, errs.NotImplement
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
// TODO return storage details (total space, free space, etc.)
|
||||||
|
var r CapacityResp
|
||||||
|
err := d.request(http.MethodGet, "/user/capacity", nil, &r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: model.DiskUsage{
|
||||||
|
TotalSpace: r.Total,
|
||||||
|
FreeSpace: r.Total - r.Used,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
// return nil, errs.NotSupport
|
// return nil, errs.NotSupport
|
||||||
//}
|
//}
|
||||||
|
@ -204,3 +204,9 @@ type FolderSummaryResp struct {
|
|||||||
CalculatedAt time.Time `json:"calculated_at"`
|
CalculatedAt time.Time `json:"calculated_at"`
|
||||||
} `json:"folder_summary"`
|
} `json:"folder_summary"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CapacityResp struct {
|
||||||
|
Total uint64 `json:"total"`
|
||||||
|
Used uint64 `json:"used"`
|
||||||
|
// StoragePackTotal uint64 `json:"storage_pack_total"`
|
||||||
|
}
|
||||||
|
230
drivers/cnb_releases/driver.go
Normal file
230
drivers/cnb_releases/driver.go
Normal file
@ -0,0 +1,230 @@
|
|||||||
|
package cnb_releases
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"mime/multipart"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CnbReleases struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
ref *CnbReleases
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Init(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) InitReference(storage driver.Driver) error {
|
||||||
|
refStorage, ok := storage.(*CnbReleases)
|
||||||
|
if ok {
|
||||||
|
d.ref = refStorage
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("ref: storage is not CnbReleases")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Drop(ctx context.Context) error {
|
||||||
|
d.ref = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
if dir.GetPath() == "/" {
|
||||||
|
// get all releases for root dir
|
||||||
|
var resp ReleaseList
|
||||||
|
|
||||||
|
err := d.Request(http.MethodGet, "/{repo}/-/releases", func(req *resty.Request) {
|
||||||
|
req.SetPathParam("repo", d.Repo)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return utils.SliceConvert(resp, func(src Release) (model.Obj, error) {
|
||||||
|
name := src.Name
|
||||||
|
if d.UseTagName {
|
||||||
|
name = src.TagName
|
||||||
|
}
|
||||||
|
return &model.Object{
|
||||||
|
ID: src.ID,
|
||||||
|
Name: name,
|
||||||
|
Size: d.sumAssetsSize(src.Assets),
|
||||||
|
Ctime: src.CreatedAt,
|
||||||
|
Modified: src.UpdatedAt,
|
||||||
|
IsFolder: true,
|
||||||
|
}, nil
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
// get release info by release id
|
||||||
|
releaseID := dir.GetID()
|
||||||
|
if releaseID == "" {
|
||||||
|
return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
var resp Release
|
||||||
|
err := d.Request(http.MethodGet, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
|
||||||
|
req.SetPathParam("repo", d.Repo)
|
||||||
|
req.SetPathParam("release_id", releaseID)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return utils.SliceConvert(resp.Assets, func(src ReleaseAsset) (model.Obj, error) {
|
||||||
|
return &Object{
|
||||||
|
Object: model.Object{
|
||||||
|
ID: src.ID,
|
||||||
|
Path: src.Path,
|
||||||
|
Name: src.Name,
|
||||||
|
Size: src.Size,
|
||||||
|
Ctime: src.CreatedAt,
|
||||||
|
Modified: src.UpdatedAt,
|
||||||
|
IsFolder: false,
|
||||||
|
},
|
||||||
|
ParentID: dir.GetID(),
|
||||||
|
}, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
return &model.Link{
|
||||||
|
URL: "https://cnb.cool" + file.GetPath(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
if parentDir.GetPath() == "/" {
|
||||||
|
// create a new release
|
||||||
|
branch := d.DefaultBranch
|
||||||
|
if branch == "" {
|
||||||
|
branch = "main" // fallback to "main" if not set
|
||||||
|
}
|
||||||
|
return d.Request(http.MethodPost, "/{repo}/-/releases", func(req *resty.Request) {
|
||||||
|
req.SetPathParam("repo", d.Repo)
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"name": dirName,
|
||||||
|
"tag_name": dirName,
|
||||||
|
"target_commitish": branch,
|
||||||
|
})
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
return errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
|
if srcObj.IsDir() && !d.UseTagName {
|
||||||
|
return d.Request(http.MethodPatch, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
|
||||||
|
req.SetPathParam("repo", d.Repo)
|
||||||
|
req.SetPathParam("release_id", srcObj.GetID())
|
||||||
|
req.SetFormData(map[string]string{
|
||||||
|
"name": newName,
|
||||||
|
})
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
return errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
if obj.IsDir() {
|
||||||
|
return d.Request(http.MethodDelete, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
|
||||||
|
req.SetPathParam("repo", d.Repo)
|
||||||
|
req.SetPathParam("release_id", obj.GetID())
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
if o, ok := obj.(*Object); ok {
|
||||||
|
return d.Request(http.MethodDelete, "/{repo}/-/releases/{release_id}/assets/{asset_id}", func(req *resty.Request) {
|
||||||
|
req.SetPathParam("repo", d.Repo)
|
||||||
|
req.SetPathParam("release_id", o.ParentID)
|
||||||
|
req.SetPathParam("asset_id", obj.GetID())
|
||||||
|
}, nil)
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("unable to get release ID")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
|
// 1. get upload info
|
||||||
|
var resp ReleaseAssetUploadURL
|
||||||
|
err := d.Request(http.MethodPost, "/{repo}/-/releases/{release_id}/asset-upload-url", func(req *resty.Request) {
|
||||||
|
req.SetPathParam("repo", d.Repo)
|
||||||
|
req.SetPathParam("release_id", dstDir.GetID())
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"asset_name": file.GetName(),
|
||||||
|
"overwrite": true,
|
||||||
|
"size": file.GetSize(),
|
||||||
|
})
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. upload file
|
||||||
|
// use multipart to create form file
|
||||||
|
var b bytes.Buffer
|
||||||
|
w := multipart.NewWriter(&b)
|
||||||
|
_, err = w.CreateFormFile("file", file.GetName())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headSize := b.Len()
|
||||||
|
err = w.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
head := bytes.NewReader(b.Bytes()[:headSize])
|
||||||
|
tail := bytes.NewReader(b.Bytes()[headSize:])
|
||||||
|
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.MultiReader(head, file, tail))
|
||||||
|
|
||||||
|
// use net/http to upload file
|
||||||
|
ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Duration(resp.ExpiresInSec+1)*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
req, err := http.NewRequestWithContext(ctxWithTimeout, http.MethodPost, resp.UploadURL, rateLimitedRd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", w.FormDataContentType())
|
||||||
|
req.Header.Set("User-Agent", base.UserAgent)
|
||||||
|
httpResp, err := base.HttpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer httpResp.Body.Close()
|
||||||
|
if httpResp.StatusCode != http.StatusNoContent {
|
||||||
|
return fmt.Errorf("upload file failed: %s", httpResp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. verify upload
|
||||||
|
return d.Request(http.MethodPost, resp.VerifyURL, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*CnbReleases)(nil)
|
26
drivers/cnb_releases/meta.go
Normal file
26
drivers/cnb_releases/meta.go
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
package cnb_releases
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
driver.RootPath
|
||||||
|
Repo string `json:"repo" type:"string" required:"true"`
|
||||||
|
Token string `json:"token" type:"string" required:"true"`
|
||||||
|
UseTagName bool `json:"use_tag_name" type:"bool" default:"false" help:"Use tag name instead of release name"`
|
||||||
|
DefaultBranch string `json:"default_branch" type:"string" default:"main" help:"Default branch for new releases"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "CNB Releases",
|
||||||
|
LocalSort: true,
|
||||||
|
DefaultRoot: "/",
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &CnbReleases{}
|
||||||
|
})
|
||||||
|
}
|
100
drivers/cnb_releases/types.go
Normal file
100
drivers/cnb_releases/types.go
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
package cnb_releases
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Object struct {
|
||||||
|
model.Object
|
||||||
|
ParentID string
|
||||||
|
}
|
||||||
|
|
||||||
|
type TagList []Tag
|
||||||
|
|
||||||
|
type Tag struct {
|
||||||
|
Commit struct {
|
||||||
|
Author UserInfo `json:"author"`
|
||||||
|
Commit CommitObject `json:"commit"`
|
||||||
|
Committer UserInfo `json:"committer"`
|
||||||
|
Parents []CommitParent `json:"parents"`
|
||||||
|
Sha string `json:"sha"`
|
||||||
|
} `json:"commit"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Target string `json:"target"`
|
||||||
|
TargetType string `json:"target_type"`
|
||||||
|
Verification TagObjectVerification `json:"verification"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type UserInfo struct {
|
||||||
|
Freeze bool `json:"freeze"`
|
||||||
|
Nickname string `json:"nickname"`
|
||||||
|
Username string `json:"username"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CommitObject struct {
|
||||||
|
Author Signature `json:"author"`
|
||||||
|
CommentCount int `json:"comment_count"`
|
||||||
|
Committer Signature `json:"committer"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Tree CommitObjectTree `json:"tree"`
|
||||||
|
Verification CommitObjectVerification `json:"verification"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Signature struct {
|
||||||
|
Date time.Time `json:"date"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CommitObjectTree struct {
|
||||||
|
Sha string `json:"sha"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CommitObjectVerification struct {
|
||||||
|
Payload string `json:"payload"`
|
||||||
|
Reason string `json:"reason"`
|
||||||
|
Signature string `json:"signature"`
|
||||||
|
Verified bool `json:"verified"`
|
||||||
|
VerifiedAt string `json:"verified_at"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CommitParent = CommitObjectTree
|
||||||
|
|
||||||
|
type TagObjectVerification = CommitObjectVerification
|
||||||
|
|
||||||
|
type ReleaseList []Release
|
||||||
|
|
||||||
|
type Release struct {
|
||||||
|
Assets []ReleaseAsset `json:"assets"`
|
||||||
|
Author UserInfo `json:"author"`
|
||||||
|
Body string `json:"body"`
|
||||||
|
CreatedAt time.Time `json:"created_at"`
|
||||||
|
Draft bool `json:"draft"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
IsLatest bool `json:"is_latest"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Prerelease bool `json:"prerelease"`
|
||||||
|
PublishedAt time.Time `json:"published_at"`
|
||||||
|
TagCommitish string `json:"tag_commitish"`
|
||||||
|
TagName string `json:"tag_name"`
|
||||||
|
UpdatedAt time.Time `json:"updated_at"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReleaseAsset struct {
|
||||||
|
ContentType string `json:"content_type"`
|
||||||
|
CreatedAt time.Time `json:"created_at"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
UpdatedAt time.Time `json:"updated_at"`
|
||||||
|
Uploader UserInfo `json:"uploader"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReleaseAssetUploadURL struct {
|
||||||
|
UploadURL string `json:"upload_url"`
|
||||||
|
ExpiresInSec int `json:"expires_in_sec"`
|
||||||
|
VerifyURL string `json:"verify_url"`
|
||||||
|
}
|
58
drivers/cnb_releases/util.go
Normal file
58
drivers/cnb_releases/util.go
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
package cnb_releases
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// do others that not defined in Driver interface
|
||||||
|
|
||||||
|
func (d *CnbReleases) Request(method string, path string, callback base.ReqCallback, resp any) error {
|
||||||
|
if d.ref != nil {
|
||||||
|
return d.ref.Request(method, path, callback, resp)
|
||||||
|
}
|
||||||
|
var url string
|
||||||
|
if strings.HasPrefix(path, "http") {
|
||||||
|
url = path
|
||||||
|
} else {
|
||||||
|
url = "https://api.cnb.cool" + path
|
||||||
|
}
|
||||||
|
req := base.RestyClient.R()
|
||||||
|
req.SetHeader("Accept", "application/json")
|
||||||
|
req.SetAuthScheme("Bearer")
|
||||||
|
req.SetAuthToken(d.Token)
|
||||||
|
|
||||||
|
if callback != nil {
|
||||||
|
callback(req)
|
||||||
|
}
|
||||||
|
res, err := req.Execute(method, url)
|
||||||
|
log.Debugln(res.String())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if res.StatusCode() != http.StatusOK && res.StatusCode() != http.StatusCreated && res.StatusCode() != http.StatusNoContent {
|
||||||
|
return fmt.Errorf("failed to request %s, status code: %d, message: %s", url, res.StatusCode(), res.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp != nil {
|
||||||
|
err = json.Unmarshal(res.Body(), resp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) sumAssetsSize(assets []ReleaseAsset) int64 {
|
||||||
|
var size int64
|
||||||
|
for _, asset := range assets {
|
||||||
|
size += asset.Size
|
||||||
|
}
|
||||||
|
return size
|
||||||
|
}
|
@ -411,6 +411,20 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
wd, ok := d.remoteStorage.(driver.WithDetails)
|
||||||
|
if !ok {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
remoteDetails, err := wd.GetDetails(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: remoteDetails.DiskUsage,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
//func (d *Safe) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
//func (d *Safe) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
// return nil, errs.NotSupport
|
// return nil, errs.NotSupport
|
||||||
//}
|
//}
|
||||||
|
@ -51,7 +51,7 @@ func (d *Local) Config() driver.Config {
|
|||||||
|
|
||||||
func (d *Local) Init(ctx context.Context) error {
|
func (d *Local) Init(ctx context.Context) error {
|
||||||
if d.MkdirPerm == "" {
|
if d.MkdirPerm == "" {
|
||||||
d.mkdirPerm = 0777
|
d.mkdirPerm = 0o777
|
||||||
} else {
|
} else {
|
||||||
v, err := strconv.ParseUint(d.MkdirPerm, 8, 32)
|
v, err := strconv.ParseUint(d.MkdirPerm, 8, 32)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -150,6 +150,7 @@ func (d *Local) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
|||||||
}
|
}
|
||||||
return files, nil
|
return files, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string, fullPath string) model.Obj {
|
func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string, fullPath string) model.Obj {
|
||||||
thumb := ""
|
thumb := ""
|
||||||
if d.Thumbnail {
|
if d.Thumbnail {
|
||||||
@ -198,7 +199,7 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
|||||||
path = filepath.Join(d.GetRootPath(), path)
|
path = filepath.Join(d.GetRootPath(), path)
|
||||||
f, err := os.Stat(path)
|
f, err := os.Stat(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "cannot find the file") {
|
if os.IsNotExist(err) {
|
||||||
return nil, errs.ObjectNotFound
|
return nil, errs.ObjectNotFound
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -374,6 +375,13 @@ func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
err = os.Remove(obj.GetPath())
|
err = os.Remove(obj.GetPath())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if !utils.Exists(d.RecycleBinPath) {
|
||||||
|
err = os.MkdirAll(d.RecycleBinPath, 0o755)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
dstPath := filepath.Join(d.RecycleBinPath, obj.GetName())
|
dstPath := filepath.Join(d.RecycleBinPath, obj.GetName())
|
||||||
if utils.Exists(dstPath) {
|
if utils.Exists(dstPath) {
|
||||||
dstPath = filepath.Join(d.RecycleBinPath, obj.GetName()+"_"+time.Now().Format("20060102150405"))
|
dstPath = filepath.Join(d.RecycleBinPath, obj.GetName()+"_"+time.Now().Format("20060102150405"))
|
||||||
@ -427,4 +435,14 @@ func (d *Local) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Local) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
du, err := getDiskUsage(d.RootFolderPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: du,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*Local)(nil)
|
var _ driver.Driver = (*Local)(nil)
|
||||||
|
@ -5,8 +5,25 @@ package local
|
|||||||
import (
|
import (
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
func isHidden(f fs.FileInfo, _ string) bool {
|
func isHidden(f fs.FileInfo, _ string) bool {
|
||||||
return strings.HasPrefix(f.Name(), ".")
|
return strings.HasPrefix(f.Name(), ".")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getDiskUsage(path string) (model.DiskUsage, error) {
|
||||||
|
var stat syscall.Statfs_t
|
||||||
|
err := syscall.Statfs(path, &stat)
|
||||||
|
if err != nil {
|
||||||
|
return model.DiskUsage{}, err
|
||||||
|
}
|
||||||
|
total := stat.Blocks * uint64(stat.Bsize)
|
||||||
|
free := stat.Bfree * uint64(stat.Bsize)
|
||||||
|
return model.DiskUsage{
|
||||||
|
TotalSpace: total,
|
||||||
|
FreeSpace: free,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
@ -3,9 +3,13 @@
|
|||||||
package local
|
package local
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
func isHidden(f fs.FileInfo, fullPath string) bool {
|
func isHidden(f fs.FileInfo, fullPath string) bool {
|
||||||
@ -20,3 +24,28 @@ func isHidden(f fs.FileInfo, fullPath string) bool {
|
|||||||
}
|
}
|
||||||
return attrs&syscall.FILE_ATTRIBUTE_HIDDEN != 0
|
return attrs&syscall.FILE_ATTRIBUTE_HIDDEN != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getDiskUsage(path string) (model.DiskUsage, error) {
|
||||||
|
abs, err := filepath.Abs(path)
|
||||||
|
if err != nil {
|
||||||
|
return model.DiskUsage{}, err
|
||||||
|
}
|
||||||
|
root := filepath.VolumeName(abs)
|
||||||
|
if len(root) != 2 || root[1] != ':' {
|
||||||
|
return model.DiskUsage{}, errors.New("cannot get disk label")
|
||||||
|
}
|
||||||
|
var freeBytes, totalBytes, totalFreeBytes uint64
|
||||||
|
err = windows.GetDiskFreeSpaceEx(
|
||||||
|
windows.StringToUTF16Ptr(root),
|
||||||
|
&freeBytes,
|
||||||
|
&totalBytes,
|
||||||
|
&totalFreeBytes,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return model.DiskUsage{}, err
|
||||||
|
}
|
||||||
|
return model.DiskUsage{
|
||||||
|
TotalSpace: totalBytes,
|
||||||
|
FreeSpace: freeBytes,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
@ -127,4 +128,22 @@ func (d *SFTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *SFTP) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
stat, err := d.client.StatVFS(d.RootFolderPath)
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "unimplemented") {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
total := stat.Blocks * stat.Bsize
|
||||||
|
free := stat.Bfree * stat.Bsize
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: model.DiskUsage{
|
||||||
|
TotalSpace: total,
|
||||||
|
FreeSpace: free,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*SFTP)(nil)
|
var _ driver.Driver = (*SFTP)(nil)
|
||||||
|
@ -205,6 +205,22 @@ func (d *SMB) Put(ctx context.Context, dstDir model.Obj, stream model.FileStream
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *SMB) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
if err := d.checkConn(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stat, err := d.fs.Statfs(d.RootFolderPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: model.DiskUsage{
|
||||||
|
TotalSpace: stat.BlockSize() * stat.TotalBlockCount(),
|
||||||
|
FreeSpace: stat.BlockSize() * stat.AvailableBlockCount(),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
//func (d *SMB) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
//func (d *SMB) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
// return nil, errs.NotSupport
|
// return nil, errs.NotSupport
|
||||||
//}
|
//}
|
||||||
|
@ -93,6 +93,11 @@ func (d *Template) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.O
|
|||||||
return nil, errs.NotImplement
|
return nil, errs.NotImplement
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Template) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
// TODO return storage details (total space, free space, etc.)
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
// return nil, errs.NotSupport
|
// return nil, errs.NotSupport
|
||||||
//}
|
//}
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
package archives
|
package archives
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
stdpath "path"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||||
@ -107,7 +108,7 @@ func (Archives) Decompress(ss []*stream.SeekableStream, outputPath string, args
|
|||||||
}
|
}
|
||||||
if stat.IsDir() {
|
if stat.IsDir() {
|
||||||
isDir = true
|
isDir = true
|
||||||
outputPath = stdpath.Join(outputPath, stat.Name())
|
outputPath = filepath.Join(outputPath, stat.Name())
|
||||||
err = os.Mkdir(outputPath, 0700)
|
err = os.Mkdir(outputPath, 0700)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return filterPassword(err)
|
return filterPassword(err)
|
||||||
@ -120,11 +121,14 @@ func (Archives) Decompress(ss []*stream.SeekableStream, outputPath string, args
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
relPath := strings.TrimPrefix(p, path+"/")
|
relPath := strings.TrimPrefix(p, path+"/")
|
||||||
dstPath := stdpath.Join(outputPath, relPath)
|
dstPath := filepath.Join(outputPath, relPath)
|
||||||
|
if !strings.HasPrefix(dstPath, outputPath+string(os.PathSeparator)) {
|
||||||
|
return fmt.Errorf("illegal file path: %s", relPath)
|
||||||
|
}
|
||||||
if d.IsDir() {
|
if d.IsDir() {
|
||||||
err = os.MkdirAll(dstPath, 0700)
|
err = os.MkdirAll(dstPath, 0700)
|
||||||
} else {
|
} else {
|
||||||
dir := stdpath.Dir(dstPath)
|
dir := filepath.Dir(dstPath)
|
||||||
err = decompress(fsys, p, dir, func(_ float64) {})
|
err = decompress(fsys, p, dir, func(_ float64) {})
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
package archives
|
package archives
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
fs2 "io/fs"
|
fs2 "io/fs"
|
||||||
"os"
|
"os"
|
||||||
stdpath "path"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
@ -69,7 +70,11 @@ func decompress(fsys fs2.FS, filePath, targetPath string, up model.UpdateProgres
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
f, err := os.OpenFile(stdpath.Join(targetPath, stat.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
destPath := filepath.Join(targetPath, stat.Name())
|
||||||
|
if !strings.HasPrefix(destPath, targetPath+string(os.PathSeparator)) {
|
||||||
|
return fmt.Errorf("illegal file path: %s", stat.Name())
|
||||||
|
}
|
||||||
|
f, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
package iso9660
|
package iso9660
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
stdpath "path"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
@ -79,7 +81,11 @@ func (ISO9660) Decompress(ss []*stream.SeekableStream, outputPath string, args m
|
|||||||
}
|
}
|
||||||
if obj.IsDir() {
|
if obj.IsDir() {
|
||||||
if args.InnerPath != "/" {
|
if args.InnerPath != "/" {
|
||||||
outputPath = stdpath.Join(outputPath, obj.Name())
|
rootpath := outputPath
|
||||||
|
outputPath = filepath.Join(outputPath, obj.Name())
|
||||||
|
if !strings.HasPrefix(outputPath, rootpath+string(os.PathSeparator)) {
|
||||||
|
return fmt.Errorf("illegal file path: %s", obj.Name())
|
||||||
|
}
|
||||||
if err = os.MkdirAll(outputPath, 0700); err != nil {
|
if err = os.MkdirAll(outputPath, 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
package iso9660
|
package iso9660
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
stdpath "path"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
@ -62,7 +63,11 @@ func toModelObj(file *iso9660.File) model.Obj {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func decompress(f *iso9660.File, path string, up model.UpdateProgress) error {
|
func decompress(f *iso9660.File, path string, up model.UpdateProgress) error {
|
||||||
file, err := os.OpenFile(stdpath.Join(path, f.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
destPath := filepath.Join(path, f.Name())
|
||||||
|
if !strings.HasPrefix(destPath, path+string(os.PathSeparator)) {
|
||||||
|
return fmt.Errorf("illegal file path: %s", f.Name())
|
||||||
|
}
|
||||||
|
file, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -84,7 +89,10 @@ func decompressAll(children []*iso9660.File, path string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
nextPath := stdpath.Join(path, child.Name())
|
nextPath := filepath.Join(path, child.Name())
|
||||||
|
if !strings.HasPrefix(nextPath, path+string(os.PathSeparator)) {
|
||||||
|
return fmt.Errorf("illegal file path: %s", child.Name())
|
||||||
|
}
|
||||||
if err = os.MkdirAll(nextPath, 0700); err != nil {
|
if err = os.MkdirAll(nextPath, 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@ package rardecode
|
|||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
stdpath "path"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||||
@ -93,7 +93,7 @@ func (RarDecoder) Decompress(ss []*stream.SeekableStream, outputPath string, arg
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||||
innerBase := stdpath.Base(innerPath)
|
innerBase := filepath.Base(innerPath)
|
||||||
createdBaseDir := false
|
createdBaseDir := false
|
||||||
for {
|
for {
|
||||||
var header *rardecode.FileHeader
|
var header *rardecode.FileHeader
|
||||||
@ -115,7 +115,7 @@ func (RarDecoder) Decompress(ss []*stream.SeekableStream, outputPath string, arg
|
|||||||
}
|
}
|
||||||
break
|
break
|
||||||
} else if strings.HasPrefix(name, innerPath+"/") {
|
} else if strings.HasPrefix(name, innerPath+"/") {
|
||||||
targetPath := stdpath.Join(outputPath, innerBase)
|
targetPath := filepath.Join(outputPath, innerBase)
|
||||||
if !createdBaseDir {
|
if !createdBaseDir {
|
||||||
err = os.Mkdir(targetPath, 0700)
|
err = os.Mkdir(targetPath, 0700)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -5,7 +5,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
stdpath "path"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -124,7 +124,7 @@ type WrapFileInfo struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *WrapFileInfo) Name() string {
|
func (f *WrapFileInfo) Name() string {
|
||||||
return stdpath.Base(f.File.Name)
|
return filepath.Base(f.File.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *WrapFileInfo) Size() int64 {
|
func (f *WrapFileInfo) Size() int64 {
|
||||||
@ -183,13 +183,17 @@ func getReader(ss []*stream.SeekableStream, password string) (*rardecode.Reader,
|
|||||||
|
|
||||||
func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath, outputPath string) error {
|
func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath, outputPath string) error {
|
||||||
targetPath := outputPath
|
targetPath := outputPath
|
||||||
dir, base := stdpath.Split(filePath)
|
dir, base := filepath.Split(filePath)
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
targetPath = stdpath.Join(targetPath, dir)
|
targetPath = filepath.Join(targetPath, dir)
|
||||||
|
if strings.HasPrefix(targetPath, outputPath+string(os.PathSeparator)) {
|
||||||
err := os.MkdirAll(targetPath, 0700)
|
err := os.MkdirAll(targetPath, 0700)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
targetPath = outputPath
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if base != "" {
|
if base != "" {
|
||||||
err := _decompress(reader, header, targetPath, func(_ float64) {})
|
err := _decompress(reader, header, targetPath, func(_ float64) {})
|
||||||
@ -201,7 +205,11 @@ func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath
|
|||||||
}
|
}
|
||||||
|
|
||||||
func _decompress(reader *rardecode.Reader, header *rardecode.FileHeader, targetPath string, up model.UpdateProgress) error {
|
func _decompress(reader *rardecode.Reader, header *rardecode.FileHeader, targetPath string, up model.UpdateProgress) error {
|
||||||
f, err := os.OpenFile(stdpath.Join(targetPath, stdpath.Base(header.Name)), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
destPath := filepath.Join(targetPath, filepath.Base(header.Name))
|
||||||
|
if !strings.HasPrefix(destPath, targetPath+string(os.PathSeparator)) {
|
||||||
|
return fmt.Errorf("illegal file path: %s", filepath.Base(header.Name))
|
||||||
|
}
|
||||||
|
f, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
package tool
|
package tool
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
stdpath "path"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
@ -40,13 +41,13 @@ func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree
|
|||||||
isNewFolder := false
|
isNewFolder := false
|
||||||
if !file.FileInfo().IsDir() {
|
if !file.FileInfo().IsDir() {
|
||||||
// 先将 文件 添加到 所在的文件夹
|
// 先将 文件 添加到 所在的文件夹
|
||||||
dir = stdpath.Dir(name)
|
dir = filepath.Dir(name)
|
||||||
dirObj = dirMap[dir]
|
dirObj = dirMap[dir]
|
||||||
if dirObj == nil {
|
if dirObj == nil {
|
||||||
isNewFolder = dir != "."
|
isNewFolder = dir != "."
|
||||||
dirObj = &model.ObjectTree{}
|
dirObj = &model.ObjectTree{}
|
||||||
dirObj.IsFolder = true
|
dirObj.IsFolder = true
|
||||||
dirObj.Name = stdpath.Base(dir)
|
dirObj.Name = filepath.Base(dir)
|
||||||
dirObj.Modified = file.FileInfo().ModTime()
|
dirObj.Modified = file.FileInfo().ModTime()
|
||||||
dirMap[dir] = dirObj
|
dirMap[dir] = dirObj
|
||||||
}
|
}
|
||||||
@ -64,28 +65,28 @@ func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree
|
|||||||
dirMap[dir] = dirObj
|
dirMap[dir] = dirObj
|
||||||
}
|
}
|
||||||
dirObj.IsFolder = true
|
dirObj.IsFolder = true
|
||||||
dirObj.Name = stdpath.Base(dir)
|
dirObj.Name = filepath.Base(dir)
|
||||||
dirObj.Modified = file.FileInfo().ModTime()
|
dirObj.Modified = file.FileInfo().ModTime()
|
||||||
}
|
}
|
||||||
if isNewFolder {
|
if isNewFolder {
|
||||||
// 将 文件夹 添加到 父文件夹
|
// 将 文件夹 添加到 父文件夹
|
||||||
// 考虑压缩包仅记录文件的路径,不记录文件夹
|
// 考虑压缩包仅记录文件的路径,不记录文件夹
|
||||||
// 循环创建所有父文件夹
|
// 循环创建所有父文件夹
|
||||||
parentDir := stdpath.Dir(dir)
|
parentDir := filepath.Dir(dir)
|
||||||
for {
|
for {
|
||||||
parentDirObj := dirMap[parentDir]
|
parentDirObj := dirMap[parentDir]
|
||||||
if parentDirObj == nil {
|
if parentDirObj == nil {
|
||||||
parentDirObj = &model.ObjectTree{}
|
parentDirObj = &model.ObjectTree{}
|
||||||
if parentDir != "." {
|
if parentDir != "." {
|
||||||
parentDirObj.IsFolder = true
|
parentDirObj.IsFolder = true
|
||||||
parentDirObj.Name = stdpath.Base(parentDir)
|
parentDirObj.Name = filepath.Base(parentDir)
|
||||||
parentDirObj.Modified = file.FileInfo().ModTime()
|
parentDirObj.Modified = file.FileInfo().ModTime()
|
||||||
}
|
}
|
||||||
dirMap[parentDir] = parentDirObj
|
dirMap[parentDir] = parentDirObj
|
||||||
}
|
}
|
||||||
parentDirObj.Children = append(parentDirObj.Children, dirObj)
|
parentDirObj.Children = append(parentDirObj.Children, dirObj)
|
||||||
|
|
||||||
parentDir = stdpath.Dir(parentDir)
|
parentDir = filepath.Dir(parentDir)
|
||||||
if dirMap[parentDir] != nil {
|
if dirMap[parentDir] != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -127,7 +128,7 @@ func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args mode
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||||
innerBase := stdpath.Base(innerPath)
|
innerBase := filepath.Base(innerPath)
|
||||||
createdBaseDir := false
|
createdBaseDir := false
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
name := file.Name()
|
name := file.Name()
|
||||||
@ -138,7 +139,7 @@ func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args mode
|
|||||||
}
|
}
|
||||||
break
|
break
|
||||||
} else if strings.HasPrefix(name, innerPath+"/") {
|
} else if strings.HasPrefix(name, innerPath+"/") {
|
||||||
targetPath := stdpath.Join(outputPath, innerBase)
|
targetPath := filepath.Join(outputPath, innerBase)
|
||||||
if !createdBaseDir {
|
if !createdBaseDir {
|
||||||
err = os.Mkdir(targetPath, 0700)
|
err = os.Mkdir(targetPath, 0700)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -159,13 +160,17 @@ func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args mode
|
|||||||
|
|
||||||
func decompress(file SubFile, filePath, outputPath, password string) error {
|
func decompress(file SubFile, filePath, outputPath, password string) error {
|
||||||
targetPath := outputPath
|
targetPath := outputPath
|
||||||
dir, base := stdpath.Split(filePath)
|
dir, base := filepath.Split(filePath)
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
targetPath = stdpath.Join(targetPath, dir)
|
targetPath = filepath.Join(targetPath, dir)
|
||||||
|
if strings.HasPrefix(targetPath, outputPath+string(os.PathSeparator)) {
|
||||||
err := os.MkdirAll(targetPath, 0700)
|
err := os.MkdirAll(targetPath, 0700)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
targetPath = outputPath
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if base != "" {
|
if base != "" {
|
||||||
err := _decompress(file, targetPath, password, func(_ float64) {})
|
err := _decompress(file, targetPath, password, func(_ float64) {})
|
||||||
@ -185,7 +190,11 @@ func _decompress(file SubFile, targetPath, password string, up model.UpdateProgr
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer func() { _ = rc.Close() }()
|
defer func() { _ = rc.Close() }()
|
||||||
f, err := os.OpenFile(stdpath.Join(targetPath, file.FileInfo().Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
destPath := filepath.Join(targetPath, file.FileInfo().Name())
|
||||||
|
if !strings.HasPrefix(destPath, targetPath+string(os.PathSeparator)) {
|
||||||
|
return fmt.Errorf("illegal file path: %s", file.FileInfo().Name())
|
||||||
|
}
|
||||||
|
f, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -114,6 +114,7 @@ func InitialSettings() []model.SettingItem {
|
|||||||
{Key: "share_icon", Value: "🎁", Type: conf.TypeString, Group: model.STYLE},
|
{Key: "share_icon", Value: "🎁", Type: conf.TypeString, Group: model.STYLE},
|
||||||
{Key: "home_container", Value: "max_980px", Type: conf.TypeSelect, Options: "max_980px,hope_container", Group: model.STYLE},
|
{Key: "home_container", Value: "max_980px", Type: conf.TypeSelect, Options: "max_980px,hope_container", Group: model.STYLE},
|
||||||
{Key: "settings_layout", Value: "list", Type: conf.TypeSelect, Options: "list,responsive", Group: model.STYLE},
|
{Key: "settings_layout", Value: "list", Type: conf.TypeSelect, Options: "list,responsive", Group: model.STYLE},
|
||||||
|
{Key: conf.HideStorageDetails, Value: "false", Type: conf.TypeBool, Group: model.STYLE, Flag: model.PRIVATE},
|
||||||
// preview settings
|
// preview settings
|
||||||
{Key: conf.TextTypes, Value: "txt,htm,html,xml,java,properties,sql,js,md,json,conf,ini,vue,php,py,bat,gitignore,yml,go,sh,c,cpp,h,hpp,tsx,vtt,srt,ass,rs,lrc,strm", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
|
{Key: conf.TextTypes, Value: "txt,htm,html,xml,java,properties,sql,js,md,json,conf,ini,vue,php,py,bat,gitignore,yml,go,sh,c,cpp,h,hpp,tsx,vtt,srt,ass,rs,lrc,strm", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
|
||||||
{Key: conf.AudioTypes, Value: "mp3,flac,ogg,m4a,wav,opus,wma", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
|
{Key: conf.AudioTypes, Value: "mp3,flac,ogg,m4a,wav,opus,wma", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
|
||||||
|
@ -20,6 +20,7 @@ const (
|
|||||||
Logo = "logo" // multi-lines text, L1: light, EOL: dark
|
Logo = "logo" // multi-lines text, L1: light, EOL: dark
|
||||||
Favicon = "favicon"
|
Favicon = "favicon"
|
||||||
MainColor = "main_color"
|
MainColor = "main_color"
|
||||||
|
HideStorageDetails = "hide_storage_details"
|
||||||
|
|
||||||
// preview
|
// preview
|
||||||
TextTypes = "text_types"
|
TextTypes = "text_types"
|
||||||
|
@ -210,6 +210,11 @@ type ArchiveDecompressResult interface {
|
|||||||
ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error)
|
ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type WithDetails interface {
|
||||||
|
// GetDetails get storage details (total space, free space, etc.)
|
||||||
|
GetDetails(ctx context.Context) (*model.StorageDetails, error)
|
||||||
|
}
|
||||||
|
|
||||||
type Reference interface {
|
type Reference interface {
|
||||||
InitReference(storage Driver) error
|
InitReference(storage Driver) error
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
type ListArgs struct {
|
type ListArgs struct {
|
||||||
Refresh bool
|
Refresh bool
|
||||||
NoLog bool
|
NoLog bool
|
||||||
|
WithStorageDetails bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func List(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) {
|
func List(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) {
|
||||||
@ -36,10 +37,11 @@ func List(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error)
|
|||||||
|
|
||||||
type GetArgs struct {
|
type GetArgs struct {
|
||||||
NoLog bool
|
NoLog bool
|
||||||
|
WithStorageDetails bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func Get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) {
|
func Get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) {
|
||||||
res, err := get(ctx, path)
|
res, err := get(ctx, path, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !args.NoLog {
|
if !args.NoLog {
|
||||||
log.Warnf("failed get %s: %s", path, err)
|
log.Warnf("failed get %s: %s", path, err)
|
||||||
|
@ -11,11 +11,11 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
func get(ctx context.Context, path string) (model.Obj, error) {
|
func get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) {
|
||||||
path = utils.FixAndCleanPath(path)
|
path = utils.FixAndCleanPath(path)
|
||||||
// maybe a virtual file
|
// maybe a virtual file
|
||||||
if path != "/" {
|
if path != "/" {
|
||||||
virtualFiles := op.GetStorageVirtualFilesByPath(stdpath.Dir(path))
|
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, stdpath.Dir(path), !args.WithStorageDetails)
|
||||||
for _, f := range virtualFiles {
|
for _, f := range virtualFiles {
|
||||||
if f.GetName() == stdpath.Base(path) {
|
if f.GetName() == stdpath.Base(path) {
|
||||||
return f, nil
|
return f, nil
|
||||||
|
@ -15,7 +15,7 @@ import (
|
|||||||
func list(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) {
|
func list(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) {
|
||||||
meta, _ := ctx.Value(conf.MetaKey).(*model.Meta)
|
meta, _ := ctx.Value(conf.MetaKey).(*model.Meta)
|
||||||
user, _ := ctx.Value(conf.UserKey).(*model.User)
|
user, _ := ctx.Value(conf.UserKey).(*model.User)
|
||||||
virtualFiles := op.GetStorageVirtualFilesByPath(path)
|
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails)
|
||||||
storage, actualPath, err := op.GetStorageAndActualPath(path)
|
storage, actualPath, err := op.GetStorageAndActualPath(path)
|
||||||
if err != nil && len(virtualFiles) == 0 {
|
if err != nil && len(virtualFiles) == 0 {
|
||||||
return nil, errors.WithMessage(err, "failed get storage")
|
return nil, errors.WithMessage(err, "failed get storage")
|
||||||
|
@ -80,6 +80,10 @@ type SetPath interface {
|
|||||||
SetPath(path string)
|
SetPath(path string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ObjWithProvider interface {
|
||||||
|
GetProvider() string
|
||||||
|
}
|
||||||
|
|
||||||
func SortFiles(objs []Obj, orderBy, orderDirection string) {
|
func SortFiles(objs []Obj, orderBy, orderDirection string) {
|
||||||
if orderBy == "" {
|
if orderBy == "" {
|
||||||
return
|
return
|
||||||
@ -166,6 +170,16 @@ func GetUrl(obj Obj) (url string, ok bool) {
|
|||||||
return url, false
|
return url, false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetProvider(obj Obj) (string, bool) {
|
||||||
|
if obj, ok := obj.(ObjWithProvider); ok {
|
||||||
|
return obj.GetProvider(), true
|
||||||
|
}
|
||||||
|
if unwrap, ok := obj.(ObjUnwrap); ok {
|
||||||
|
return GetProvider(unwrap.Unwrap())
|
||||||
|
}
|
||||||
|
return "unknown", false
|
||||||
|
}
|
||||||
|
|
||||||
func GetRawObject(obj Obj) *Object {
|
func GetRawObject(obj Obj) *Object {
|
||||||
switch v := obj.(type) {
|
switch v := obj.(type) {
|
||||||
case *ObjThumbURL:
|
case *ObjThumbURL:
|
||||||
@ -174,6 +188,8 @@ func GetRawObject(obj Obj) *Object {
|
|||||||
return &v.Object
|
return &v.Object
|
||||||
case *ObjectURL:
|
case *ObjectURL:
|
||||||
return &v.Object
|
return &v.Object
|
||||||
|
case *ObjectProvider:
|
||||||
|
return &v.Object
|
||||||
case *Object:
|
case *Object:
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
@ -99,3 +99,16 @@ type ObjThumbURL struct {
|
|||||||
Thumbnail
|
Thumbnail
|
||||||
Url
|
Url
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Provider struct {
|
||||||
|
Provider string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Provider) GetProvider() string {
|
||||||
|
return p.Provider
|
||||||
|
}
|
||||||
|
|
||||||
|
type ObjectProvider struct {
|
||||||
|
Object
|
||||||
|
Provider
|
||||||
|
}
|
||||||
|
@ -55,3 +55,40 @@ func (p Proxy) Webdav302() bool {
|
|||||||
func (p Proxy) WebdavProxyURL() bool {
|
func (p Proxy) WebdavProxyURL() bool {
|
||||||
return p.WebdavPolicy == "use_proxy_url"
|
return p.WebdavPolicy == "use_proxy_url"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DiskUsage struct {
|
||||||
|
TotalSpace uint64 `json:"total_space"`
|
||||||
|
FreeSpace uint64 `json:"free_space"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type StorageDetails struct {
|
||||||
|
DiskUsage
|
||||||
|
}
|
||||||
|
|
||||||
|
type StorageDetailsWithName struct {
|
||||||
|
*StorageDetails
|
||||||
|
DriverName string `json:"driver_name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ObjWithStorageDetails interface {
|
||||||
|
GetStorageDetails() *StorageDetailsWithName
|
||||||
|
}
|
||||||
|
|
||||||
|
type ObjStorageDetails struct {
|
||||||
|
Obj
|
||||||
|
StorageDetailsWithName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o ObjStorageDetails) GetStorageDetails() *StorageDetailsWithName {
|
||||||
|
return &o.StorageDetailsWithName
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetStorageDetails(obj Obj) (*StorageDetailsWithName, bool) {
|
||||||
|
if obj, ok := obj.(ObjWithStorageDetails); ok {
|
||||||
|
return obj.GetStorageDetails(), true
|
||||||
|
}
|
||||||
|
if unwrap, ok := obj.(ObjUnwrap); ok {
|
||||||
|
return GetStorageDetails(unwrap.Unwrap())
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
@ -125,7 +125,7 @@ type ConcurrencyLimit struct {
|
|||||||
Limit int // 需要大于0
|
Limit int // 需要大于0
|
||||||
}
|
}
|
||||||
|
|
||||||
var ErrExceedMaxConcurrency = ErrorHttpStatusCode(http.StatusTooManyRequests)
|
var ErrExceedMaxConcurrency = HttpStatusCodeError(http.StatusTooManyRequests)
|
||||||
|
|
||||||
func (l *ConcurrencyLimit) sub() error {
|
func (l *ConcurrencyLimit) sub() error {
|
||||||
l._m.Lock()
|
l._m.Lock()
|
||||||
@ -403,7 +403,7 @@ var errInfiniteRetry = errors.New("infinite retry")
|
|||||||
func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
|
func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
|
||||||
resp, err := d.cfg.HttpClient(d.ctx, params)
|
resp, err := d.cfg.HttpClient(d.ctx, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode)
|
statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError)
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -114,7 +114,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
|||||||
reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1})
|
reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
code = http.StatusRequestedRangeNotSatisfiable
|
code = http.StatusRequestedRangeNotSatisfiable
|
||||||
if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
|
if statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError); ok {
|
||||||
code = int(statusCode)
|
code = int(statusCode)
|
||||||
}
|
}
|
||||||
http.Error(w, err.Error(), code)
|
http.Error(w, err.Error(), code)
|
||||||
@ -137,7 +137,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
|||||||
sendContent, err = RangeReadCloser.RangeRead(ctx, ra)
|
sendContent, err = RangeReadCloser.RangeRead(ctx, ra)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
code = http.StatusRequestedRangeNotSatisfiable
|
code = http.StatusRequestedRangeNotSatisfiable
|
||||||
if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
|
if statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError); ok {
|
||||||
code = int(statusCode)
|
code = int(statusCode)
|
||||||
}
|
}
|
||||||
http.Error(w, err.Error(), code)
|
http.Error(w, err.Error(), code)
|
||||||
@ -199,7 +199,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
|||||||
log.Warnf("Maybe size incorrect or reader not giving correct/full data, or connection closed before finish. written bytes: %d ,sendSize:%d, ", written, sendSize)
|
log.Warnf("Maybe size incorrect or reader not giving correct/full data, or connection closed before finish. written bytes: %d ,sendSize:%d, ", written, sendSize)
|
||||||
}
|
}
|
||||||
code = http.StatusInternalServerError
|
code = http.StatusInternalServerError
|
||||||
if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
|
if statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError); ok {
|
||||||
code = int(statusCode)
|
code = int(statusCode)
|
||||||
}
|
}
|
||||||
w.WriteHeader(code)
|
w.WriteHeader(code)
|
||||||
@ -253,14 +253,14 @@ func RequestHttp(ctx context.Context, httpMethod string, headerOverride http.Hea
|
|||||||
_ = res.Body.Close()
|
_ = res.Body.Close()
|
||||||
msg := string(all)
|
msg := string(all)
|
||||||
log.Debugln(msg)
|
log.Debugln(msg)
|
||||||
return nil, fmt.Errorf("http request [%s] failure,status: %w response:%s", URL, ErrorHttpStatusCode(res.StatusCode), msg)
|
return nil, fmt.Errorf("http request [%s] failure,status: %w response:%s", URL, HttpStatusCodeError(res.StatusCode), msg)
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type ErrorHttpStatusCode int
|
type HttpStatusCodeError int
|
||||||
|
|
||||||
func (e ErrorHttpStatusCode) Error() string {
|
func (e HttpStatusCodeError) Error() string {
|
||||||
return fmt.Sprintf("%d|%s", e, http.StatusText(int(e)))
|
return fmt.Sprintf("%d|%s", e, http.StatusText(int(e)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -405,11 +405,8 @@ func DriverExtract(ctx context.Context, storage driver.Driver, path string, args
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
link, err, _ := extractG.Do(key, fn)
|
link, err, _ := extractG.Do(key, fn)
|
||||||
if err == nil && !link.AcquireReference() {
|
for err == nil && !link.AcquireReference() {
|
||||||
link, err, _ = extractG.Do(key, fn)
|
link, err, _ = extractG.Do(key, fn)
|
||||||
if err == nil {
|
|
||||||
link.AcquireReference()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if err == errLinkMFileCache {
|
if err == errLinkMFileCache {
|
||||||
if linkM != nil {
|
if linkM != nil {
|
||||||
|
@ -184,6 +184,9 @@ func Get(ctx context.Context, storage driver.Driver, path string) (model.Obj, er
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return model.WrapObjName(obj), nil
|
return model.WrapObjName(obj), nil
|
||||||
}
|
}
|
||||||
|
if !errs.IsNotImplement(err) {
|
||||||
|
return nil, errors.WithMessage(err, "failed to get obj")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// is root folder
|
// is root folder
|
||||||
@ -327,11 +330,8 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
link, err, _ := linkG.Do(key, fn)
|
link, err, _ := linkG.Do(key, fn)
|
||||||
if err == nil && !link.AcquireReference() {
|
for err == nil && !link.AcquireReference() {
|
||||||
link, err, _ = linkG.Do(key, fn)
|
link, err, _ = linkG.Do(key, fn)
|
||||||
if err == nil {
|
|
||||||
link.AcquireReference()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == errLinkMFileCache {
|
if err == errLinkMFileCache {
|
||||||
@ -630,6 +630,11 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
|
|||||||
up = func(p float64) {}
|
up = func(p float64) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 如果小于0,则通过缓存获取完整大小,可能发生于流式上传
|
||||||
|
if file.GetSize() < 0 {
|
||||||
|
log.Warnf("file size < 0, try to get full size from cache")
|
||||||
|
file.CacheFullAndWriter(nil, nil)
|
||||||
|
}
|
||||||
switch s := storage.(type) {
|
switch s := storage.(type) {
|
||||||
case driver.PutResult:
|
case driver.PutResult:
|
||||||
var newObj model.Obj
|
var newObj model.Obj
|
||||||
|
@ -15,7 +15,6 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/generic_sync"
|
"github.com/OpenListTeam/OpenList/v4/pkg/generic_sync"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
mapset "github.com/deckarep/golang-set/v2"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
@ -335,6 +334,40 @@ func getStoragesByPath(path string) []driver.Driver {
|
|||||||
// for example, there are: /a/b,/a/c,/a/d/e,/a/b.balance1,/av
|
// for example, there are: /a/b,/a/c,/a/d/e,/a/b.balance1,/av
|
||||||
// GetStorageVirtualFilesByPath(/a) => b,c,d
|
// GetStorageVirtualFilesByPath(/a) => b,c,d
|
||||||
func GetStorageVirtualFilesByPath(prefix string) []model.Obj {
|
func GetStorageVirtualFilesByPath(prefix string) []model.Obj {
|
||||||
|
return getStorageVirtualFilesByPath(prefix, func(_ driver.Driver, obj model.Obj) model.Obj {
|
||||||
|
return obj
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, hideDetails ...bool) []model.Obj {
|
||||||
|
if utils.IsBool(hideDetails...) {
|
||||||
|
return GetStorageVirtualFilesByPath(prefix)
|
||||||
|
}
|
||||||
|
return getStorageVirtualFilesByPath(prefix, func(d driver.Driver, obj model.Obj) model.Obj {
|
||||||
|
ret := &model.ObjStorageDetails{
|
||||||
|
Obj: obj,
|
||||||
|
StorageDetailsWithName: model.StorageDetailsWithName{
|
||||||
|
StorageDetails: nil,
|
||||||
|
DriverName: d.Config().Name,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
storage, ok := d.(driver.WithDetails)
|
||||||
|
if !ok {
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
details, err := storage.GetDetails(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, errs.NotImplement) {
|
||||||
|
log.Errorf("failed get %s storage details: %+v", d.GetStorage().MountPath, err)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
ret.StorageDetails = details
|
||||||
|
return ret
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStorageVirtualFilesByPath(prefix string, rootCallback func(driver.Driver, model.Obj) model.Obj) []model.Obj {
|
||||||
files := make([]model.Obj, 0)
|
files := make([]model.Obj, 0)
|
||||||
storages := storagesMap.Values()
|
storages := storagesMap.Values()
|
||||||
sort.Slice(storages, func(i, j int) bool {
|
sort.Slice(storages, func(i, j int) bool {
|
||||||
@ -345,21 +378,30 @@ func GetStorageVirtualFilesByPath(prefix string) []model.Obj {
|
|||||||
})
|
})
|
||||||
|
|
||||||
prefix = utils.FixAndCleanPath(prefix)
|
prefix = utils.FixAndCleanPath(prefix)
|
||||||
set := mapset.NewSet[string]()
|
set := make(map[string]int)
|
||||||
for _, v := range storages {
|
for _, v := range storages {
|
||||||
mountPath := utils.GetActualMountPath(v.GetStorage().MountPath)
|
mountPath := utils.GetActualMountPath(v.GetStorage().MountPath)
|
||||||
// Exclude prefix itself and non prefix
|
// Exclude prefix itself and non prefix
|
||||||
if len(prefix) >= len(mountPath) || !utils.IsSubPath(prefix, mountPath) {
|
if len(prefix) >= len(mountPath) || !utils.IsSubPath(prefix, mountPath) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
name := strings.SplitN(strings.TrimPrefix(mountPath[len(prefix):], "/"), "/", 2)[0]
|
names := strings.SplitN(strings.TrimPrefix(mountPath[len(prefix):], "/"), "/", 2)
|
||||||
if set.Add(name) {
|
idx, ok := set[names[0]]
|
||||||
files = append(files, &model.Object{
|
if !ok {
|
||||||
Name: name,
|
set[names[0]] = len(files)
|
||||||
|
obj := &model.Object{
|
||||||
|
Name: names[0],
|
||||||
Size: 0,
|
Size: 0,
|
||||||
Modified: v.GetStorage().Modified,
|
Modified: v.GetStorage().Modified,
|
||||||
IsFolder: true,
|
IsFolder: true,
|
||||||
})
|
}
|
||||||
|
if len(names) == 1 {
|
||||||
|
files = append(files, rootCallback(v, obj))
|
||||||
|
} else {
|
||||||
|
files = append(files, obj)
|
||||||
|
}
|
||||||
|
} else if len(names) == 1 {
|
||||||
|
files[idx] = rootCallback(v, files[idx])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return files
|
return files
|
||||||
|
@ -137,6 +137,60 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ
|
|||||||
if writer != nil {
|
if writer != nil {
|
||||||
reader = io.TeeReader(reader, writer)
|
reader = io.TeeReader(reader, writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if f.GetSize() < 0 {
|
||||||
|
if f.peekBuff == nil {
|
||||||
|
f.peekBuff = &buffer.Reader{}
|
||||||
|
}
|
||||||
|
// 检查是否有数据
|
||||||
|
buf := []byte{0}
|
||||||
|
n, err := io.ReadFull(reader, buf)
|
||||||
|
if n > 0 {
|
||||||
|
f.peekBuff.Append(buf[:n])
|
||||||
|
}
|
||||||
|
if err == io.ErrUnexpectedEOF {
|
||||||
|
f.size = f.peekBuff.Size()
|
||||||
|
f.Reader = f.peekBuff
|
||||||
|
return f.peekBuff, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if conf.MaxBufferLimit-n > conf.MmapThreshold && conf.MmapThreshold > 0 {
|
||||||
|
m, err := mmap.Alloc(conf.MaxBufferLimit - n)
|
||||||
|
if err == nil {
|
||||||
|
f.Add(utils.CloseFunc(func() error {
|
||||||
|
return mmap.Free(m)
|
||||||
|
}))
|
||||||
|
n, err = io.ReadFull(reader, m)
|
||||||
|
if n > 0 {
|
||||||
|
f.peekBuff.Append(m[:n])
|
||||||
|
}
|
||||||
|
if err == io.ErrUnexpectedEOF {
|
||||||
|
f.size = f.peekBuff.Size()
|
||||||
|
f.Reader = f.peekBuff
|
||||||
|
return f.peekBuff, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpF, err := utils.CreateTempFile(reader, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f.Add(utils.CloseFunc(func() error {
|
||||||
|
return errors.Join(tmpF.Close(), os.RemoveAll(tmpF.Name()))
|
||||||
|
}))
|
||||||
|
peekF, err := buffer.NewPeekFile(f.peekBuff, tmpF)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f.size = peekF.Size()
|
||||||
|
f.Reader = peekF
|
||||||
|
return peekF, nil
|
||||||
|
}
|
||||||
|
|
||||||
f.Reader = reader
|
f.Reader = reader
|
||||||
return f.cache(f.GetSize())
|
return f.cache(f.GetSize())
|
||||||
}
|
}
|
||||||
@ -162,7 +216,7 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size := httpRange.Start + httpRange.Length
|
size := httpRange.Start + httpRange.Length
|
||||||
if f.peekBuff != nil && size <= int64(f.peekBuff.Len()) {
|
if f.peekBuff != nil && size <= int64(f.peekBuff.Size()) {
|
||||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -194,7 +248,7 @@ func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
|||||||
f.peekBuff = &buffer.Reader{}
|
f.peekBuff = &buffer.Reader{}
|
||||||
f.oriReader = f.Reader
|
f.oriReader = f.Reader
|
||||||
}
|
}
|
||||||
bufSize := maxCacheSize - int64(f.peekBuff.Len())
|
bufSize := maxCacheSize - int64(f.peekBuff.Size())
|
||||||
var buf []byte
|
var buf []byte
|
||||||
if conf.MmapThreshold > 0 && bufSize >= int64(conf.MmapThreshold) {
|
if conf.MmapThreshold > 0 && bufSize >= int64(conf.MmapThreshold) {
|
||||||
m, err := mmap.Alloc(int(bufSize))
|
m, err := mmap.Alloc(int(bufSize))
|
||||||
@ -213,7 +267,7 @@ func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
|||||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err)
|
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err)
|
||||||
}
|
}
|
||||||
f.peekBuff.Append(buf)
|
f.peekBuff.Append(buf)
|
||||||
if int64(f.peekBuff.Len()) >= f.GetSize() {
|
if int64(f.peekBuff.Size()) >= f.GetSize() {
|
||||||
f.Reader = f.peekBuff
|
f.Reader = f.peekBuff
|
||||||
f.oriReader = nil
|
f.oriReader = nil
|
||||||
} else {
|
} else {
|
||||||
|
@ -77,7 +77,7 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
|
|||||||
|
|
||||||
response, err := net.RequestHttp(ctx, "GET", header, link.URL)
|
response, err := net.RequestHttp(ctx, "GET", header, link.URL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if _, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok {
|
if _, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("http request failure, err:%w", err)
|
return nil, fmt.Errorf("http request failure, err:%w", err)
|
||||||
|
@ -8,83 +8,86 @@ import (
|
|||||||
// 用于存储不复用的[]byte
|
// 用于存储不复用的[]byte
|
||||||
type Reader struct {
|
type Reader struct {
|
||||||
bufs [][]byte
|
bufs [][]byte
|
||||||
length int
|
size int64
|
||||||
offset int
|
offset int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reader) Len() int {
|
func (r *Reader) Size() int64 {
|
||||||
return r.length
|
return r.size
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reader) Append(buf []byte) {
|
func (r *Reader) Append(buf []byte) {
|
||||||
r.length += len(buf)
|
r.size += int64(len(buf))
|
||||||
r.bufs = append(r.bufs, buf)
|
r.bufs = append(r.bufs, buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reader) Read(p []byte) (int, error) {
|
func (r *Reader) Read(p []byte) (int, error) {
|
||||||
n, err := r.ReadAt(p, int64(r.offset))
|
n, err := r.ReadAt(p, r.offset)
|
||||||
if n > 0 {
|
if n > 0 {
|
||||||
r.offset += n
|
r.offset += int64(n)
|
||||||
}
|
}
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reader) ReadAt(p []byte, off int64) (int, error) {
|
func (r *Reader) ReadAt(p []byte, off int64) (int, error) {
|
||||||
if off < 0 || off >= int64(r.length) {
|
if off < 0 || off >= r.size {
|
||||||
return 0, io.EOF
|
return 0, io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
n, length := 0, int64(0)
|
n := 0
|
||||||
readFrom := false
|
readFrom := false
|
||||||
for _, buf := range r.bufs {
|
for _, buf := range r.bufs {
|
||||||
newLength := length + int64(len(buf))
|
|
||||||
if readFrom {
|
if readFrom {
|
||||||
w := copy(p[n:], buf)
|
nn := copy(p[n:], buf)
|
||||||
n += w
|
n += nn
|
||||||
} else if off < newLength {
|
|
||||||
readFrom = true
|
|
||||||
w := copy(p[n:], buf[int(off-length):])
|
|
||||||
n += w
|
|
||||||
}
|
|
||||||
if n == len(p) {
|
if n == len(p) {
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
length = newLength
|
} else if newOff := off - int64(len(buf)); newOff >= 0 {
|
||||||
|
off = newOff
|
||||||
|
} else {
|
||||||
|
nn := copy(p, buf[off:])
|
||||||
|
if nn == len(p) {
|
||||||
|
return nn, nil
|
||||||
|
}
|
||||||
|
n += nn
|
||||||
|
readFrom = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return n, io.EOF
|
return n, io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reader) Seek(offset int64, whence int) (int64, error) {
|
func (r *Reader) Seek(offset int64, whence int) (int64, error) {
|
||||||
var abs int
|
|
||||||
switch whence {
|
switch whence {
|
||||||
case io.SeekStart:
|
case io.SeekStart:
|
||||||
abs = int(offset)
|
|
||||||
case io.SeekCurrent:
|
case io.SeekCurrent:
|
||||||
abs = r.offset + int(offset)
|
offset = r.offset + offset
|
||||||
case io.SeekEnd:
|
case io.SeekEnd:
|
||||||
abs = r.length + int(offset)
|
offset = r.size + offset
|
||||||
default:
|
default:
|
||||||
return 0, errors.New("Seek: invalid whence")
|
return 0, errors.New("Seek: invalid whence")
|
||||||
}
|
}
|
||||||
|
|
||||||
if abs < 0 || abs > r.length {
|
if offset < 0 || offset > r.size {
|
||||||
return 0, errors.New("Seek: invalid offset")
|
return 0, errors.New("Seek: invalid offset")
|
||||||
}
|
}
|
||||||
|
|
||||||
r.offset = abs
|
r.offset = offset
|
||||||
return int64(abs), nil
|
return offset, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reader) Reset() {
|
func (r *Reader) Reset() {
|
||||||
clear(r.bufs)
|
clear(r.bufs)
|
||||||
r.bufs = nil
|
r.bufs = nil
|
||||||
r.length = 0
|
r.size = 0
|
||||||
r.offset = 0
|
r.offset = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewReader(buf ...[]byte) *Reader {
|
func NewReader(buf ...[]byte) *Reader {
|
||||||
b := &Reader{}
|
b := &Reader{
|
||||||
|
bufs: make([][]byte, 0, len(buf)),
|
||||||
|
}
|
||||||
for _, b1 := range buf {
|
for _, b1 := range buf {
|
||||||
b.Append(b1)
|
b.Append(b1)
|
||||||
}
|
}
|
||||||
|
@ -13,8 +13,7 @@ func TestReader_ReadAt(t *testing.T) {
|
|||||||
}
|
}
|
||||||
bs := &Reader{}
|
bs := &Reader{}
|
||||||
bs.Append([]byte("github.com"))
|
bs.Append([]byte("github.com"))
|
||||||
bs.Append([]byte("/"))
|
bs.Append([]byte("/OpenList"))
|
||||||
bs.Append([]byte("OpenList"))
|
|
||||||
bs.Append([]byte("Team/"))
|
bs.Append([]byte("Team/"))
|
||||||
bs.Append([]byte("OpenList"))
|
bs.Append([]byte("OpenList"))
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@ -71,7 +70,7 @@ func TestReader_ReadAt(t *testing.T) {
|
|||||||
off: 24,
|
off: 24,
|
||||||
},
|
},
|
||||||
want: func(a args, n int, err error) error {
|
want: func(a args, n int, err error) error {
|
||||||
if n != bs.Len()-int(a.off) {
|
if n != int(bs.Size()-a.off) {
|
||||||
return errors.New("read length not match")
|
return errors.New("read length not match")
|
||||||
}
|
}
|
||||||
if string(a.p[:n]) != "OpenList" {
|
if string(a.p[:n]) != "OpenList" {
|
||||||
|
88
pkg/buffer/file.go
Normal file
88
pkg/buffer/file.go
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
package buffer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PeekFile struct {
|
||||||
|
peek *Reader
|
||||||
|
file *os.File
|
||||||
|
offset int64
|
||||||
|
size int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PeekFile) Read(b []byte) (n int, err error) {
|
||||||
|
n, err = p.ReadAt(b, p.offset)
|
||||||
|
if n > 0 {
|
||||||
|
p.offset += int64(n)
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PeekFile) ReadAt(b []byte, off int64) (n int, err error) {
|
||||||
|
if off < p.peek.Size() {
|
||||||
|
n, err = p.peek.ReadAt(b, off)
|
||||||
|
if err == nil || n == len(b) {
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
// EOF
|
||||||
|
}
|
||||||
|
var nn int
|
||||||
|
nn, err = p.file.ReadAt(b[n:], off+int64(n)-p.peek.Size())
|
||||||
|
return n + nn, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PeekFile) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
switch whence {
|
||||||
|
case io.SeekStart:
|
||||||
|
case io.SeekCurrent:
|
||||||
|
if offset == 0 {
|
||||||
|
return p.offset, nil
|
||||||
|
}
|
||||||
|
offset = p.offset + offset
|
||||||
|
case io.SeekEnd:
|
||||||
|
offset = p.size + offset
|
||||||
|
default:
|
||||||
|
return 0, errors.New("Seek: invalid whence")
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset < 0 || offset > p.size {
|
||||||
|
return 0, errors.New("Seek: invalid offset")
|
||||||
|
}
|
||||||
|
if offset <= p.peek.Size() {
|
||||||
|
_, err := p.peek.Seek(offset, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_, err = p.file.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
_, err := p.peek.Seek(p.peek.Size(), io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_, err = p.file.Seek(offset-p.peek.Size(), io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p.offset = offset
|
||||||
|
return offset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PeekFile) Size() int64 {
|
||||||
|
return p.size
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPeekFile(peek *Reader, file *os.File) (*PeekFile, error) {
|
||||||
|
stat, err := file.Stat()
|
||||||
|
if err == nil {
|
||||||
|
return &PeekFile{peek: peek, file: file, size: stat.Size() + peek.Size()}, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
@ -57,6 +57,11 @@ var (
|
|||||||
Supported []*HashType
|
Supported []*HashType
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func GetHashByName(name string) (ht *HashType, ok bool) {
|
||||||
|
ht, ok = name2hash[name]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// RegisterHash adds a new Hash to the list and returns its Type
|
// RegisterHash adds a new Hash to the list and returns its Type
|
||||||
func RegisterHash(name, alias string, width int, newFunc func() hash.Hash) *HashType {
|
func RegisterHash(name, alias string, width int, newFunc func() hash.Hash) *HashType {
|
||||||
return RegisterHashWithParam(name, alias, width, func(a ...any) hash.Hash { return newFunc() })
|
return RegisterHashWithParam(name, alias, width, func(a ...any) hash.Hash { return newFunc() })
|
||||||
|
@ -200,26 +200,37 @@ type SyncClosers struct {
|
|||||||
var _ SyncClosersIF = (*SyncClosers)(nil)
|
var _ SyncClosersIF = (*SyncClosers)(nil)
|
||||||
|
|
||||||
func (c *SyncClosers) AcquireReference() bool {
|
func (c *SyncClosers) AcquireReference() bool {
|
||||||
ref := atomic.AddInt32(&c.ref, 1)
|
for {
|
||||||
if ref > 0 {
|
ref := atomic.LoadInt32(&c.ref)
|
||||||
// log.Debugf("SyncClosers.AcquireReference %p,ref=%d\n", c, ref)
|
if ref < 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
newRef := ref + 1
|
||||||
|
if atomic.CompareAndSwapInt32(&c.ref, ref, newRef) {
|
||||||
|
log.Debugf("AcquireReference %p: %d", c, newRef)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
atomic.StoreInt32(&c.ref, math.MinInt16)
|
}
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *SyncClosers) Close() error {
|
func (c *SyncClosers) Close() error {
|
||||||
ref := atomic.AddInt32(&c.ref, -1)
|
for {
|
||||||
if ref < -1 {
|
ref := atomic.LoadInt32(&c.ref)
|
||||||
atomic.StoreInt32(&c.ref, math.MinInt16)
|
if ref < 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// log.Debugf("SyncClosers.Close %p,ref=%d\n", c, ref+1)
|
newRef := ref - 1
|
||||||
if ref > 0 {
|
if newRef <= 0 {
|
||||||
|
newRef = math.MinInt16
|
||||||
|
}
|
||||||
|
if atomic.CompareAndSwapInt32(&c.ref, ref, newRef) {
|
||||||
|
log.Debugf("Close %p: %d", c, ref)
|
||||||
|
if newRef > 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
atomic.StoreInt32(&c.ref, math.MinInt16)
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var errs []error
|
var errs []error
|
||||||
for _, closer := range c.closers {
|
for _, closer := range c.closers {
|
||||||
|
@ -147,7 +147,7 @@ func proxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) {
|
|||||||
if Writer.IsWritten() {
|
if Writer.IsWritten() {
|
||||||
log.Errorf("%s %s local proxy error: %+v", c.Request.Method, c.Request.URL.Path, err)
|
log.Errorf("%s %s local proxy error: %+v", c.Request.Method, c.Request.URL.Path, err)
|
||||||
} else {
|
} else {
|
||||||
if statusCode, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok {
|
if statusCode, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok {
|
||||||
common.ErrorPage(c, err, int(statusCode), true)
|
common.ErrorPage(c, err, int(statusCode), true)
|
||||||
} else {
|
} else {
|
||||||
common.ErrorPage(c, err, 500, true)
|
common.ErrorPage(c, err, 500, true)
|
||||||
|
@ -45,6 +45,7 @@ type ObjResp struct {
|
|||||||
Type int `json:"type"`
|
Type int `json:"type"`
|
||||||
HashInfoStr string `json:"hashinfo"`
|
HashInfoStr string `json:"hashinfo"`
|
||||||
HashInfo map[*utils.HashType]string `json:"hash_info"`
|
HashInfo map[*utils.HashType]string `json:"hash_info"`
|
||||||
|
MountDetails *model.StorageDetailsWithName `json:"mount_details,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type FsListResp struct {
|
type FsListResp struct {
|
||||||
@ -98,7 +99,10 @@ func FsList(c *gin.Context, req *ListReq, user *model.User) {
|
|||||||
common.ErrorStrResp(c, "Refresh without permission", 403)
|
common.ErrorStrResp(c, "Refresh without permission", 403)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
objs, err := fs.List(c.Request.Context(), reqPath, &fs.ListArgs{Refresh: req.Refresh})
|
objs, err := fs.List(c.Request.Context(), reqPath, &fs.ListArgs{
|
||||||
|
Refresh: req.Refresh,
|
||||||
|
WithStorageDetails: !user.IsGuest() && !setting.GetBool(conf.HideStorageDetails),
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
common.ErrorResp(c, err, 500)
|
common.ErrorResp(c, err, 500)
|
||||||
return
|
return
|
||||||
@ -224,6 +228,7 @@ func toObjsResp(objs []model.Obj, parent string, encrypt bool) []ObjResp {
|
|||||||
var resp []ObjResp
|
var resp []ObjResp
|
||||||
for _, obj := range objs {
|
for _, obj := range objs {
|
||||||
thumb, _ := model.GetThumb(obj)
|
thumb, _ := model.GetThumb(obj)
|
||||||
|
mountDetails, _ := model.GetStorageDetails(obj)
|
||||||
resp = append(resp, ObjResp{
|
resp = append(resp, ObjResp{
|
||||||
Id: obj.GetID(),
|
Id: obj.GetID(),
|
||||||
Path: obj.GetPath(),
|
Path: obj.GetPath(),
|
||||||
@ -237,6 +242,7 @@ func toObjsResp(objs []model.Obj, parent string, encrypt bool) []ObjResp {
|
|||||||
Sign: common.Sign(obj, parent, encrypt),
|
Sign: common.Sign(obj, parent, encrypt),
|
||||||
Thumb: thumb,
|
Thumb: thumb,
|
||||||
Type: utils.GetObjType(obj.GetName(), obj.IsDir()),
|
Type: utils.GetObjType(obj.GetName(), obj.IsDir()),
|
||||||
|
MountDetails: mountDetails,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return resp
|
return resp
|
||||||
@ -293,7 +299,9 @@ func FsGet(c *gin.Context, req *FsGetReq, user *model.User) {
|
|||||||
common.ErrorStrResp(c, "password is incorrect or you have no permission", 403)
|
common.ErrorStrResp(c, "password is incorrect or you have no permission", 403)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
obj, err := fs.Get(c.Request.Context(), reqPath, &fs.GetArgs{})
|
obj, err := fs.Get(c.Request.Context(), reqPath, &fs.GetArgs{
|
||||||
|
WithStorageDetails: !user.IsGuest() && !setting.GetBool(conf.HideStorageDetails),
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
common.ErrorResp(c, err, 500)
|
common.ErrorResp(c, err, 500)
|
||||||
return
|
return
|
||||||
@ -301,8 +309,8 @@ func FsGet(c *gin.Context, req *FsGetReq, user *model.User) {
|
|||||||
var rawURL string
|
var rawURL string
|
||||||
|
|
||||||
storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{})
|
storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{})
|
||||||
provider := "unknown"
|
provider, ok := model.GetProvider(obj)
|
||||||
if err == nil {
|
if !ok && err == nil {
|
||||||
provider = storage.Config().Name
|
provider = storage.Config().Name
|
||||||
}
|
}
|
||||||
if !obj.IsDir() {
|
if !obj.IsDir() {
|
||||||
@ -350,6 +358,7 @@ func FsGet(c *gin.Context, req *FsGetReq, user *model.User) {
|
|||||||
}
|
}
|
||||||
parentMeta, _ := op.GetNearestMeta(parentPath)
|
parentMeta, _ := op.GetNearestMeta(parentPath)
|
||||||
thumb, _ := model.GetThumb(obj)
|
thumb, _ := model.GetThumb(obj)
|
||||||
|
mountDetails, _ := model.GetStorageDetails(obj)
|
||||||
common.SuccessResp(c, FsGetResp{
|
common.SuccessResp(c, FsGetResp{
|
||||||
ObjResp: ObjResp{
|
ObjResp: ObjResp{
|
||||||
Id: obj.GetID(),
|
Id: obj.GetID(),
|
||||||
@ -364,6 +373,7 @@ func FsGet(c *gin.Context, req *FsGetReq, user *model.User) {
|
|||||||
Sign: common.Sign(obj, parentPath, isEncrypt(meta, reqPath)),
|
Sign: common.Sign(obj, parentPath, isEncrypt(meta, reqPath)),
|
||||||
Type: utils.GetFileType(obj.GetName()),
|
Type: utils.GetFileType(obj.GetName()),
|
||||||
Thumb: thumb,
|
Thumb: thumb,
|
||||||
|
MountDetails: mountDetails,
|
||||||
},
|
},
|
||||||
RawURL: rawURL,
|
RawURL: rawURL,
|
||||||
Readme: getReadme(meta, reqPath),
|
Readme: getReadme(meta, reqPath),
|
||||||
|
@ -56,15 +56,18 @@ func FsStream(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
dir, name := stdpath.Split(path)
|
dir, name := stdpath.Split(path)
|
||||||
sizeStr := c.GetHeader("Content-Length")
|
// 如果请求头 Content-Length 和 X-File-Size 都没有,则 size=-1,表示未知大小的流式上传
|
||||||
if sizeStr == "" {
|
size := c.Request.ContentLength
|
||||||
sizeStr = "0"
|
if size < 0 {
|
||||||
}
|
sizeStr := c.GetHeader("X-File-Size")
|
||||||
size, err := strconv.ParseInt(sizeStr, 10, 64)
|
if sizeStr != "" {
|
||||||
|
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
common.ErrorResp(c, err, 400)
|
common.ErrorResp(c, err, 400)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
h := make(map[*utils.HashType]string)
|
h := make(map[*utils.HashType]string)
|
||||||
if md5 := c.GetHeader("X-File-Md5"); md5 != "" {
|
if md5 := c.GetHeader("X-File-Md5"); md5 != "" {
|
||||||
h[utils.MD5] = md5
|
h[utils.MD5] = md5
|
||||||
|
@ -3,9 +3,11 @@ package handles
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/db"
|
"github.com/OpenListTeam/OpenList/v4/internal/db"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||||
@ -13,6 +15,42 @@ import (
|
|||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type StorageResp struct {
|
||||||
|
model.Storage
|
||||||
|
MountDetails *model.StorageDetails `json:"mount_details,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeStorageResp(c *gin.Context, storages []model.Storage) []*StorageResp {
|
||||||
|
ret := make([]*StorageResp, len(storages))
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for i, s := range storages {
|
||||||
|
ret[i] = &StorageResp{
|
||||||
|
Storage: s,
|
||||||
|
MountDetails: nil,
|
||||||
|
}
|
||||||
|
d, err := op.GetStorageByMountPath(s.MountPath)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
wd, ok := d.(driver.WithDetails)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
details, err := wd.GetDetails(c)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed get %s details: %+v", s.MountPath, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ret[i].MountDetails = details
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
func ListStorages(c *gin.Context) {
|
func ListStorages(c *gin.Context) {
|
||||||
var req model.PageReq
|
var req model.PageReq
|
||||||
if err := c.ShouldBind(&req); err != nil {
|
if err := c.ShouldBind(&req); err != nil {
|
||||||
@ -27,7 +65,7 @@ func ListStorages(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
common.SuccessResp(c, common.PageResp{
|
common.SuccessResp(c, common.PageResp{
|
||||||
Content: storages,
|
Content: makeStorageResp(c, storages),
|
||||||
Total: total,
|
Total: total,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -14,6 +14,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -271,7 +272,7 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta
|
|||||||
}
|
}
|
||||||
err = common.Proxy(w, r, link, fi)
|
err = common.Proxy(w, r, link, fi)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if statusCode, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok {
|
if statusCode, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok {
|
||||||
return int(statusCode), err
|
return int(statusCode), err
|
||||||
}
|
}
|
||||||
return http.StatusInternalServerError, fmt.Errorf("webdav proxy error: %+v", err)
|
return http.StatusInternalServerError, fmt.Errorf("webdav proxy error: %+v", err)
|
||||||
@ -341,9 +342,19 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return http.StatusForbidden, err
|
return http.StatusForbidden, err
|
||||||
}
|
}
|
||||||
|
size := r.ContentLength
|
||||||
|
if size < 0 {
|
||||||
|
sizeStr := r.Header.Get("X-File-Size")
|
||||||
|
if sizeStr != "" {
|
||||||
|
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return http.StatusBadRequest, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
obj := model.Object{
|
obj := model.Object{
|
||||||
Name: path.Base(reqPath),
|
Name: path.Base(reqPath),
|
||||||
Size: r.ContentLength,
|
Size: size,
|
||||||
Modified: h.getModTime(r),
|
Modified: h.getModTime(r),
|
||||||
Ctime: h.getCreateTime(r),
|
Ctime: h.getCreateTime(r),
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user