mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-09-20 12:46:17 +08:00
Compare commits
16 Commits
renovate/g
...
beta
Author | SHA1 | Date | |
---|---|---|---|
3936e736e6 | |||
68433d4f5b | |||
cc16cb35bf | |||
d3bc6321f4 | |||
cbbb5ad231 | |||
c1d03c5bcc | |||
61a8ed515f | |||
bbb7c06504 | |||
8bbdb272d4 | |||
c15ae94307 | |||
f1a5048558 | |||
1fe26bff9a | |||
433dcd156b | |||
e97f0a289e | |||
89f35170b3 | |||
8188fb2d7d |
2
.github/workflows/beta_release.yml
vendored
2
.github/workflows/beta_release.yml
vendored
@ -87,7 +87,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.24.5"
|
||||
go-version: "1.25.0"
|
||||
|
||||
- name: Setup web
|
||||
run: bash build.sh dev web
|
||||
|
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@ -33,7 +33,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.24.5"
|
||||
go-version: "1.25.0"
|
||||
|
||||
- name: Setup web
|
||||
run: bash build.sh dev web
|
||||
|
3
.github/workflows/release.yml
vendored
3
.github/workflows/release.yml
vendored
@ -46,7 +46,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.24'
|
||||
go-version: '1.25.0'
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@ -73,4 +73,5 @@ jobs:
|
||||
with:
|
||||
files: build/compress/*
|
||||
prerelease: false
|
||||
tag_name: ${{ github.event.release.tag_name }}
|
||||
|
||||
|
4
.github/workflows/release_docker.yml
vendored
4
.github/workflows/release_docker.yml
vendored
@ -47,7 +47,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
go-version: '1.25.0'
|
||||
|
||||
- name: Cache Musl
|
||||
id: cache-musl
|
||||
@ -87,7 +87,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
go-version: '1.25.0'
|
||||
|
||||
- name: Cache Musl
|
||||
id: cache-musl
|
||||
|
2
.github/workflows/test_docker.yml
vendored
2
.github/workflows/test_docker.yml
vendored
@ -36,7 +36,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
go-version: '1.25.0'
|
||||
|
||||
- name: Cache Musl
|
||||
id: cache-musl
|
||||
|
11
README.md
11
README.md
@ -74,7 +74,6 @@ Thank you for your support and understanding of the OpenList project.
|
||||
- [x] [Thunder](https://pan.xunlei.com)
|
||||
- [x] [Lanzou](https://www.lanzou.com)
|
||||
- [x] [ILanzou](https://www.ilanzou.com)
|
||||
- [x] [Aliyundrive share](https://www.alipan.com)
|
||||
- [x] [Google photo](https://photos.google.com)
|
||||
- [x] [Mega.nz](https://mega.nz)
|
||||
- [x] [Baidu photo](https://photo.baidu.com)
|
||||
@ -85,6 +84,16 @@ Thank you for your support and understanding of the OpenList project.
|
||||
- [x] [FeijiPan](https://www.feijipan.com)
|
||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||
- [x] [Chaoxing](https://www.chaoxing.com)
|
||||
- [x] [CNB](https://cnb.cool/)
|
||||
- [x] [Degoo](https://degoo.com)
|
||||
- [x] [Doubao](https://www.doubao.com)
|
||||
- [x] [Febbox](https://www.febbox.com)
|
||||
- [x] [GitHub](https://github.com)
|
||||
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||
- [x] [Weiyun](https://www.weiyun.com)
|
||||
|
||||
- [x] Easy to deploy and out-of-the-box
|
||||
- [x] File preview (PDF, markdown, code, plain text, ...)
|
||||
- [x] Image preview in gallery mode
|
||||
|
10
README_cn.md
10
README_cn.md
@ -74,7 +74,6 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
|
||||
- [x] [迅雷网盘](https://pan.xunlei.com)
|
||||
- [x] [蓝奏云](https://www.lanzou.com)
|
||||
- [x] [蓝奏云优享版](https://www.ilanzou.com)
|
||||
- [x] [阿里云盘分享](https://www.alipan.com)
|
||||
- [x] [Google 相册](https://photos.google.com)
|
||||
- [x] [Mega.nz](https://mega.nz)
|
||||
- [x] [百度相册](https://photo.baidu.com)
|
||||
@ -85,6 +84,15 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
|
||||
- [x] [飞机盘](https://www.feijipan.com)
|
||||
- [x] [多吉云](https://www.dogecloud.com/product/oss)
|
||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||
- [x] [超星](https://www.chaoxing.com)
|
||||
- [x] [CNB](https://cnb.cool/)
|
||||
- [x] [Degoo](https://degoo.com)
|
||||
- [x] [豆包](https://www.doubao.com)
|
||||
- [x] [Febbox](https://www.febbox.com)
|
||||
- [x] [GitHub](https://github.com)
|
||||
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||
- [x] [微云](https://www.weiyun.com)
|
||||
- [x] 部署方便,开箱即用
|
||||
- [x] 文件预览(PDF、markdown、代码、纯文本等)
|
||||
- [x] 画廊模式下的图片预览
|
||||
|
10
README_ja.md
10
README_ja.md
@ -74,7 +74,6 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
|
||||
- [x] [Thunder](https://pan.xunlei.com)
|
||||
- [x] [Lanzou](https://www.lanzou.com)
|
||||
- [x] [ILanzou](https://www.ilanzou.com)
|
||||
- [x] [Aliyundrive share](https://www.alipan.com)
|
||||
- [x] [Google photo](https://photos.google.com)
|
||||
- [x] [Mega.nz](https://mega.nz)
|
||||
- [x] [Baidu photo](https://photo.baidu.com)
|
||||
@ -85,6 +84,15 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
|
||||
- [x] [FeijiPan](https://www.feijipan.com)
|
||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||
- [x] [Chaoxing](https://www.chaoxing.com)
|
||||
- [x] [CNB](https://cnb.cool/)
|
||||
- [x] [Degoo](https://degoo.com)
|
||||
- [x] [Doubao](https://www.doubao.com)
|
||||
- [x] [Febbox](https://www.febbox.com)
|
||||
- [x] [GitHub](https://github.com)
|
||||
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||
- [x] [Weiyun](https://www.weiyun.com)
|
||||
- [x] 簡単にデプロイでき、すぐに使える
|
||||
- [x] ファイルプレビュー(PDF、markdown、コード、テキストなど)
|
||||
- [x] ギャラリーモードでの画像プレビュー
|
||||
|
10
README_nl.md
10
README_nl.md
@ -74,7 +74,6 @@ Dank u voor uw ondersteuning en begrip
|
||||
- [x] [Thunder](https://pan.xunlei.com)
|
||||
- [x] [Lanzou](https://www.lanzou.com)
|
||||
- [x] [ILanzou](https://www.ilanzou.com)
|
||||
- [x] [Aliyundrive share](https://www.alipan.com)
|
||||
- [x] [Google photo](https://photos.google.com)
|
||||
- [x] [Mega.nz](https://mega.nz)
|
||||
- [x] [Baidu photo](https://photo.baidu.com)
|
||||
@ -85,6 +84,15 @@ Dank u voor uw ondersteuning en begrip
|
||||
- [x] [FeijiPan](https://www.feijipan.com)
|
||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||
- [x] [Chaoxing](https://www.chaoxing.com)
|
||||
- [x] [CNB](https://cnb.cool/)
|
||||
- [x] [Degoo](https://degoo.com)
|
||||
- [x] [Doubao](https://www.doubao.com)
|
||||
- [x] [Febbox](https://www.febbox.com)
|
||||
- [x] [GitHub](https://github.com)
|
||||
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||
- [x] [Weiyun](https://www.weiyun.com)
|
||||
- [x] Eenvoudig te implementeren en direct te gebruiken
|
||||
- [x] Bestandsvoorbeeld (PDF, markdown, code, platte tekst, ...)
|
||||
- [x] Afbeeldingsvoorbeeld in galerijweergave
|
||||
|
6
build.sh
6
build.sh
@ -236,7 +236,7 @@ BuildRelease() {
|
||||
BuildLoongGLIBC() {
|
||||
local target_abi="$2"
|
||||
local output_file="$1"
|
||||
local oldWorldGoVersion="1.24.3"
|
||||
local oldWorldGoVersion="1.25.0"
|
||||
|
||||
if [ "$target_abi" = "abi1.0" ]; then
|
||||
echo building for linux-loong64-abi1.0
|
||||
@ -254,13 +254,13 @@ BuildLoongGLIBC() {
|
||||
|
||||
# Download and setup patched Go compiler for old-world
|
||||
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
||||
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
||||
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250821/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
||||
-o go-loong64-abi1.0.tar.gz; then
|
||||
echo "Error: Failed to download patched Go compiler for old-world ABI1.0"
|
||||
if [ -n "$GITHUB_TOKEN" ]; then
|
||||
echo "Error output from curl:"
|
||||
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
||||
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
||||
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250821/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
||||
-o go-loong64-abi1.0.tar.gz || true
|
||||
fi
|
||||
return 1
|
||||
|
@ -337,6 +337,27 @@ func (d *Open115) OfflineList(ctx context.Context) (*sdk.OfflineTaskListResp, er
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (d *Open115) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
userInfo, err := d.client.UserInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total, err := userInfo.RtSpaceInfo.AllTotal.Size.Int64()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
free, err := userInfo.RtSpaceInfo.AllRemain.Size.Int64()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: uint64(total),
|
||||
FreeSpace: uint64(free),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||
// return nil, errs.NotImplement
|
||||
|
@ -28,7 +28,7 @@ func (f File) CreateTime() time.Time {
|
||||
}
|
||||
|
||||
func (f File) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
return utils.NewHashInfo(utils.MD5, f.Etag)
|
||||
}
|
||||
|
||||
func (f File) GetPath() string {
|
||||
|
@ -17,6 +17,7 @@ import (
|
||||
type Open123 struct {
|
||||
model.Storage
|
||||
Addition
|
||||
UID uint64
|
||||
}
|
||||
|
||||
func (d *Open123) Config() driver.Config {
|
||||
@ -83,7 +84,7 @@ func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
}, nil
|
||||
}
|
||||
|
||||
u, err := d.getUserInfo()
|
||||
uid, err := d.getUID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -91,7 +92,7 @@ func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
duration := time.Duration(d.DirectLinkValidDuration) * time.Minute
|
||||
|
||||
newURL, err := d.SignURL(res.Data.URL, d.DirectLinkPrivateKey,
|
||||
u.Data.UID, duration)
|
||||
uid, duration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -213,5 +214,20 @@ func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
||||
return nil, fmt.Errorf("upload complete timeout")
|
||||
}
|
||||
|
||||
func (d *Open123) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
userInfo, err := d.getUserInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total := userInfo.Data.SpacePermanent + userInfo.Data.SpaceTemp
|
||||
free := total - userInfo.Data.SpaceUsed
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Open123)(nil)
|
||||
var _ driver.PutResult = (*Open123)(nil)
|
||||
|
@ -127,19 +127,19 @@ type RefreshTokenResp struct {
|
||||
type UserInfoResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
UID uint64 `json:"uid"`
|
||||
Username string `json:"username"`
|
||||
DisplayName string `json:"displayName"`
|
||||
HeadImage string `json:"headImage"`
|
||||
Passport string `json:"passport"`
|
||||
Mail string `json:"mail"`
|
||||
SpaceUsed int64 `json:"spaceUsed"`
|
||||
SpacePermanent int64 `json:"spacePermanent"`
|
||||
SpaceTemp int64 `json:"spaceTemp"`
|
||||
SpaceTempExpr string `json:"spaceTempExpr"`
|
||||
Vip bool `json:"vip"`
|
||||
DirectTraffic int64 `json:"directTraffic"`
|
||||
IsHideUID bool `json:"isHideUID"`
|
||||
UID uint64 `json:"uid"`
|
||||
// Username string `json:"username"`
|
||||
// DisplayName string `json:"displayName"`
|
||||
// HeadImage string `json:"headImage"`
|
||||
// Passport string `json:"passport"`
|
||||
// Mail string `json:"mail"`
|
||||
SpaceUsed uint64 `json:"spaceUsed"`
|
||||
SpacePermanent uint64 `json:"spacePermanent"`
|
||||
SpaceTemp uint64 `json:"spaceTemp"`
|
||||
// SpaceTempExpr int64 `json:"spaceTempExpr"`
|
||||
// Vip bool `json:"vip"`
|
||||
// DirectTraffic int64 `json:"directTraffic"`
|
||||
// IsHideUID bool `json:"isHideUID"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
|
@ -158,6 +158,18 @@ func (d *Open123) getUserInfo() (*UserInfoResp, error) {
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) getUID() (uint64, error) {
|
||||
if d.UID != 0 {
|
||||
return d.UID, nil
|
||||
}
|
||||
resp, err := d.getUserInfo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d.UID = resp.Data.UID
|
||||
return resp.Data.UID, nil
|
||||
}
|
||||
|
||||
func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*FileListResp, error) {
|
||||
var resp FileListResp
|
||||
|
||||
@ -200,7 +212,7 @@ func (d *Open123) getDirectLink(fileId int64) (*DirectLinkResp, error) {
|
||||
|
||||
_, err := d.Request(DirectLink, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"fileId": strconv.FormatInt(fileId, 10),
|
||||
"fileID": strconv.FormatInt(fileId, 10),
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
|
@ -24,7 +24,7 @@ type File struct {
|
||||
}
|
||||
|
||||
func (f File) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
return utils.NewHashInfo(utils.MD5, f.Etag)
|
||||
}
|
||||
|
||||
func (f File) GetPath() string {
|
||||
|
@ -1,7 +1,6 @@
|
||||
package _189_tv
|
||||
|
||||
import (
|
||||
"container/ring"
|
||||
"context"
|
||||
"net/http"
|
||||
"strconv"
|
||||
@ -12,18 +11,20 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
type Cloud189TV struct {
|
||||
model.Storage
|
||||
Addition
|
||||
client *resty.Client
|
||||
tokenInfo *AppSessionResp
|
||||
uploadThread int
|
||||
familyTransferFolder *ring.Ring
|
||||
cleanFamilyTransferFile func()
|
||||
storageConfig driver.Config
|
||||
client *resty.Client
|
||||
tokenInfo *AppSessionResp
|
||||
uploadThread int
|
||||
storageConfig driver.Config
|
||||
|
||||
TempUuid string
|
||||
cron *cron.Cron // 新增 cron 字段
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) Config() driver.Config {
|
||||
@ -79,10 +80,17 @@ func (y *Cloud189TV) Init(ctx context.Context) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
y.cron = cron.NewCron(time.Minute * 5)
|
||||
y.cron.Do(y.keepAlive)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) Drop(ctx context.Context) error {
|
||||
if y.cron != nil {
|
||||
y.cron.Stop()
|
||||
y.cron = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
AccessToken string `json:"access_token"`
|
||||
TempUuid string
|
||||
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
Type string `json:"type" type:"select" options:"personal,family" default:"personal"`
|
||||
|
@ -66,6 +66,10 @@ func (y *Cloud189TV) AppKeySignatureHeader(url, method string) map[string]string
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, isFamily ...bool) ([]byte, error) {
|
||||
return y.requestWithRetry(url, method, callback, params, resp, 0, isFamily...)
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) requestWithRetry(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, retryCount int, isFamily ...bool) ([]byte, error) {
|
||||
req := y.client.R().SetQueryParams(clientSuffix())
|
||||
|
||||
if params != nil {
|
||||
@ -91,7 +95,22 @@ func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, para
|
||||
|
||||
if strings.Contains(res.String(), "userSessionBO is null") ||
|
||||
strings.Contains(res.String(), "InvalidSessionKey") {
|
||||
return nil, errors.New("session expired")
|
||||
// 限制重试次数,避免无限递归
|
||||
if retryCount >= 3 {
|
||||
y.Addition.AccessToken = ""
|
||||
op.MustSaveDriverStorage(y)
|
||||
return nil, errors.New("session expired after retry")
|
||||
}
|
||||
|
||||
// 尝试刷新会话
|
||||
if err := y.refreshSession(); err != nil {
|
||||
// 如果刷新失败,说明AccessToken也已过期,需要重新登录
|
||||
y.Addition.AccessToken = ""
|
||||
op.MustSaveDriverStorage(y)
|
||||
return nil, errors.New("session expired")
|
||||
}
|
||||
// 如果刷新成功,则重试原始请求(增加重试计数)
|
||||
return y.requestWithRetry(url, method, callback, params, resp, retryCount+1, isFamily...)
|
||||
}
|
||||
|
||||
// 处理错误
|
||||
@ -211,7 +230,7 @@ func (y *Cloud189TV) login() (err error) {
|
||||
var erron RespErr
|
||||
var tokenInfo AppSessionResp
|
||||
if y.Addition.AccessToken == "" {
|
||||
if y.Addition.TempUuid == "" {
|
||||
if y.TempUuid == "" {
|
||||
// 获取登录参数
|
||||
var uuidInfo UuidInfoResp
|
||||
req.SetResult(&uuidInfo).SetError(&erron)
|
||||
@ -230,7 +249,7 @@ func (y *Cloud189TV) login() (err error) {
|
||||
if uuidInfo.Uuid == "" {
|
||||
return errors.New("uuidInfo is empty")
|
||||
}
|
||||
y.Addition.TempUuid = uuidInfo.Uuid
|
||||
y.TempUuid = uuidInfo.Uuid
|
||||
op.MustSaveDriverStorage(y)
|
||||
|
||||
// 展示二维码
|
||||
@ -258,7 +277,7 @@ func (y *Cloud189TV) login() (err error) {
|
||||
// Signature
|
||||
req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/qrcodeLoginResult.action",
|
||||
http.MethodGet))
|
||||
req.SetQueryParam("uuid", y.Addition.TempUuid)
|
||||
req.SetQueryParam("uuid", y.TempUuid)
|
||||
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/qrcodeLoginResult.action")
|
||||
if err != nil {
|
||||
return
|
||||
@ -270,7 +289,6 @@ func (y *Cloud189TV) login() (err error) {
|
||||
return errors.New("E189AccessToken is empty")
|
||||
}
|
||||
y.Addition.AccessToken = accessTokenResp.E189AccessToken
|
||||
y.Addition.TempUuid = ""
|
||||
}
|
||||
}
|
||||
// 获取SessionKey 和 SessionSecret
|
||||
@ -294,6 +312,44 @@ func (y *Cloud189TV) login() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// refreshSession 尝试使用现有的 AccessToken 刷新会话
|
||||
func (y *Cloud189TV) refreshSession() (err error) {
|
||||
var erron RespErr
|
||||
var tokenInfo AppSessionResp
|
||||
reqb := y.client.R().SetQueryParams(clientSuffix())
|
||||
reqb.SetResult(&tokenInfo).SetError(&erron)
|
||||
// Signature
|
||||
reqb.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/loginFamilyMerge.action",
|
||||
http.MethodGet))
|
||||
reqb.SetQueryParam("e189AccessToken", y.Addition.AccessToken)
|
||||
_, err = reqb.Execute(http.MethodGet, ApiUrl+"/family/manage/loginFamilyMerge.action")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if erron.HasError() {
|
||||
return &erron
|
||||
}
|
||||
|
||||
y.tokenInfo = &tokenInfo
|
||||
return nil
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) keepAlive() {
|
||||
_, err := y.get(ApiUrl+"/keepUserSession.action", func(r *resty.Request) {
|
||||
r.SetQueryParams(clientSuffix())
|
||||
}, nil)
|
||||
if err != nil {
|
||||
utils.Log.Warnf("189tv: Failed to keep user session alive: %v", err)
|
||||
// 如果keepAlive失败,尝试刷新session
|
||||
if refreshErr := y.refreshSession(); refreshErr != nil {
|
||||
utils.Log.Errorf("189tv: Failed to refresh session after keepAlive error: %v", refreshErr)
|
||||
}
|
||||
} else {
|
||||
utils.Log.Debugf("189tv: User session kept alive successfully.")
|
||||
}
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||
fileMd5 := stream.GetHash().GetHash(utils.MD5)
|
||||
if len(fileMd5) < utils.MD5.Width {
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/google/uuid"
|
||||
@ -21,12 +22,12 @@ type Cloud189PC struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
identity string
|
||||
|
||||
client *resty.Client
|
||||
|
||||
loginParam *LoginParam
|
||||
tokenInfo *AppSessionResp
|
||||
loginParam *LoginParam
|
||||
qrcodeParam *QRLoginParam
|
||||
|
||||
tokenInfo *AppSessionResp
|
||||
|
||||
uploadThread int
|
||||
|
||||
@ -35,6 +36,7 @@ type Cloud189PC struct {
|
||||
|
||||
storageConfig driver.Config
|
||||
ref *Cloud189PC
|
||||
cron *cron.Cron
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) Config() driver.Config {
|
||||
@ -84,14 +86,22 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||
})
|
||||
}
|
||||
|
||||
// 避免重复登陆
|
||||
identity := utils.GetMD5EncodeStr(y.Username + y.Password)
|
||||
if !y.isLogin() || y.identity != identity {
|
||||
y.identity = identity
|
||||
// 先尝试用Token刷新,之后尝试登陆
|
||||
if y.Addition.RefreshToken != "" {
|
||||
y.tokenInfo = &AppSessionResp{RefreshToken: y.Addition.RefreshToken}
|
||||
if err = y.refreshToken(); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if err = y.login(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// 初始化并启动 cron 任务
|
||||
y.cron = cron.NewCron(time.Duration(time.Minute * 5))
|
||||
// 每5分钟执行一次 keepAlive
|
||||
y.cron.Do(y.keepAlive)
|
||||
}
|
||||
|
||||
// 处理家庭云ID
|
||||
@ -128,6 +138,10 @@ func (d *Cloud189PC) InitReference(storage driver.Driver) error {
|
||||
|
||||
func (y *Cloud189PC) Drop(ctx context.Context) error {
|
||||
y.ref = nil
|
||||
if y.cron != nil {
|
||||
y.cron.Stop()
|
||||
y.cron = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -80,6 +80,20 @@ func timestamp() int64 {
|
||||
return time.Now().UTC().UnixNano() / 1e6
|
||||
}
|
||||
|
||||
// formatDate formats a time.Time object into the "YYYY-MM-DDHH:mm:ssSSS" format.
|
||||
func formatDate(t time.Time) string {
|
||||
// The layout string "2006-01-0215:04:05.000" corresponds to:
|
||||
// 2006 -> Year (YYYY)
|
||||
// 01 -> Month (MM)
|
||||
// 02 -> Day (DD)
|
||||
// 15 -> Hour (HH)
|
||||
// 04 -> Minute (mm)
|
||||
// 05 -> Second (ss)
|
||||
// 000 -> Millisecond (SSS) with leading zeros
|
||||
// Note the lack of a separator between the date and hour, matching the desired output.
|
||||
return t.Format("2006-01-0215:04:05.000")
|
||||
}
|
||||
|
||||
func MustParseTime(str string) *time.Time {
|
||||
lastOpTime, _ := time.ParseInLocation("2006-01-02 15:04:05 -07", str+" +08", time.Local)
|
||||
return &lastOpTime
|
||||
|
@ -6,9 +6,11 @@ import (
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
Username string `json:"username" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
VCode string `json:"validate_code"`
|
||||
LoginType string `json:"login_type" type:"select" options:"password,qrcode" default:"password" required:"true"`
|
||||
Username string `json:"username" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
VCode string `json:"validate_code"`
|
||||
RefreshToken string `json:"refresh_token" help:"To switch accounts, please clear this field"`
|
||||
driver.RootID
|
||||
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
|
@ -68,15 +68,7 @@ func (e *RespErr) Error() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// 登陆需要的参数
|
||||
type LoginParam struct {
|
||||
// 加密后的用户名和密码
|
||||
RsaUsername string
|
||||
RsaPassword string
|
||||
|
||||
// rsa密钥
|
||||
jRsaKey string
|
||||
|
||||
type BaseLoginParam struct {
|
||||
// 请求头参数
|
||||
Lt string
|
||||
ReqId string
|
||||
@ -88,6 +80,27 @@ type LoginParam struct {
|
||||
CaptchaToken string
|
||||
}
|
||||
|
||||
// QRLoginParam 用于暂存二维码登录过程中的参数
|
||||
type QRLoginParam struct {
|
||||
BaseLoginParam
|
||||
|
||||
UUID string `json:"uuid"`
|
||||
EncodeUUID string `json:"encodeuuid"`
|
||||
EncryUUID string `json:"encryuuid"`
|
||||
}
|
||||
|
||||
// 登陆需要的参数
|
||||
type LoginParam struct {
|
||||
// 加密后的用户名和密码
|
||||
RsaUsername string
|
||||
RsaPassword string
|
||||
|
||||
// rsa密钥
|
||||
jRsaKey string
|
||||
|
||||
BaseLoginParam
|
||||
}
|
||||
|
||||
// 登陆加密相关
|
||||
type EncryptConfResp struct {
|
||||
Result int `json:"result"`
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/skip2/go-qrcode"
|
||||
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
@ -54,6 +55,9 @@ const (
|
||||
MAC = "TELEMAC"
|
||||
|
||||
CHANNEL_ID = "web_cloud.189.cn"
|
||||
|
||||
// Error codes
|
||||
UserInvalidOpenTokenError = "UserInvalidOpenToken"
|
||||
)
|
||||
|
||||
func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string {
|
||||
@ -264,7 +268,14 @@ func (y *Cloud189PC) findFileByName(ctx context.Context, searchName string, fold
|
||||
}
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) login() (err error) {
|
||||
func (y *Cloud189PC) login() error {
|
||||
if y.LoginType == "qrcode" {
|
||||
return y.loginByQRCode()
|
||||
}
|
||||
return y.loginByPassword()
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) loginByPassword() (err error) {
|
||||
// 初始化登陆所需参数
|
||||
if y.loginParam == nil {
|
||||
if err = y.initLoginParam(); err != nil {
|
||||
@ -278,10 +289,15 @@ func (y *Cloud189PC) login() (err error) {
|
||||
// 销毁登陆参数
|
||||
y.loginParam = nil
|
||||
// 遇到错误,重新加载登陆参数(刷新验证码)
|
||||
if err != nil && y.NoUseOcr {
|
||||
if err1 := y.initLoginParam(); err1 != nil {
|
||||
err = fmt.Errorf("err1: %s \nerr2: %s", err, err1)
|
||||
if err != nil {
|
||||
if y.NoUseOcr {
|
||||
if err1 := y.initLoginParam(); err1 != nil {
|
||||
err = fmt.Errorf("err1: %s \nerr2: %s", err, err1)
|
||||
}
|
||||
}
|
||||
|
||||
y.Status = err.Error()
|
||||
op.MustSaveDriverStorage(y)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -336,14 +352,105 @@ func (y *Cloud189PC) login() (err error) {
|
||||
err = fmt.Errorf(tokenInfo.ResMessage)
|
||||
return
|
||||
}
|
||||
y.Addition.RefreshToken = tokenInfo.RefreshToken
|
||||
y.tokenInfo = &tokenInfo
|
||||
op.MustSaveDriverStorage(y)
|
||||
return
|
||||
}
|
||||
|
||||
/* 初始化登陆需要的参数
|
||||
* 如果遇到验证码返回错误
|
||||
*/
|
||||
func (y *Cloud189PC) initLoginParam() error {
|
||||
func (y *Cloud189PC) loginByQRCode() error {
|
||||
if y.qrcodeParam == nil {
|
||||
if err := y.initQRCodeParam(); err != nil {
|
||||
// 二维码也通过错误返回
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var state struct {
|
||||
Status int `json:"status"`
|
||||
RedirectUrl string `json:"redirectUrl"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
_, err := y.client.R().
|
||||
SetHeaders(map[string]string{
|
||||
"Referer": AUTH_URL,
|
||||
"Reqid": y.qrcodeParam.ReqId,
|
||||
"lt": y.qrcodeParam.Lt,
|
||||
}).
|
||||
SetFormData(map[string]string{
|
||||
"appId": APP_ID,
|
||||
"clientType": CLIENT_TYPE,
|
||||
"returnUrl": RETURN_URL,
|
||||
"paramId": y.qrcodeParam.ParamId,
|
||||
"uuid": y.qrcodeParam.UUID,
|
||||
"encryuuid": y.qrcodeParam.EncryUUID,
|
||||
"date": formatDate(now),
|
||||
"timeStamp": fmt.Sprint(now.UTC().UnixNano() / 1e6),
|
||||
}).
|
||||
ForceContentType("application/json;charset=UTF-8").
|
||||
SetResult(&state).
|
||||
Post(AUTH_URL + "/api/logbox/oauth2/qrcodeLoginState.do")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check QR code state: %w", err)
|
||||
}
|
||||
|
||||
switch state.Status {
|
||||
case 0: // 登录成功
|
||||
var tokenInfo AppSessionResp
|
||||
_, err = y.client.R().
|
||||
SetResult(&tokenInfo).
|
||||
SetQueryParams(clientSuffix()).
|
||||
SetQueryParam("redirectURL", state.RedirectUrl).
|
||||
Post(API_URL + "/getSessionForPC.action")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tokenInfo.ResCode != 0 {
|
||||
return fmt.Errorf(tokenInfo.ResMessage)
|
||||
}
|
||||
y.Addition.RefreshToken = tokenInfo.RefreshToken
|
||||
y.tokenInfo = &tokenInfo
|
||||
op.MustSaveDriverStorage(y)
|
||||
return nil
|
||||
case -11001: // 二维码过期
|
||||
y.qrcodeParam = nil
|
||||
return errors.New("QR code expired, please try again")
|
||||
case -106: // 等待扫描
|
||||
return y.genQRCode("QR code has not been scanned yet, please scan and save again")
|
||||
case -11002: // 等待确认
|
||||
return y.genQRCode("QR code has been scanned, please confirm the login on your phone and save again")
|
||||
default: // 其他错误
|
||||
y.qrcodeParam = nil
|
||||
return fmt.Errorf("QR code login failed with status %d: %s", state.Status, state.Msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) genQRCode(text string) error {
|
||||
// 展示二维码
|
||||
qrTemplate := `<body>
|
||||
state: %s
|
||||
<br><img src="data:image/jpeg;base64,%s"/>
|
||||
<br>Or Click here: <a href="%s">Login</a>
|
||||
</body>`
|
||||
|
||||
// Generate QR code
|
||||
qrCode, err := qrcode.Encode(y.qrcodeParam.UUID, qrcode.Medium, 256)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate QR code: %v", err)
|
||||
}
|
||||
|
||||
// Encode QR code to base64
|
||||
qrCodeBase64 := base64.StdEncoding.EncodeToString(qrCode)
|
||||
|
||||
// Create the HTML page
|
||||
qrPage := fmt.Sprintf(qrTemplate, text, qrCodeBase64, y.qrcodeParam.UUID)
|
||||
return fmt.Errorf("need verify: \n%s", qrPage)
|
||||
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) initBaseParams() (*BaseLoginParam, error) {
|
||||
// 清除cookie
|
||||
jar, _ := cookiejar.New(nil)
|
||||
y.client.SetCookieJar(jar)
|
||||
@ -357,17 +464,30 @@ func (y *Cloud189PC) initLoginParam() error {
|
||||
}).
|
||||
Get(WEB_URL + "/api/portal/unifyLoginForPC.action")
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
param := LoginParam{
|
||||
return &BaseLoginParam{
|
||||
CaptchaToken: regexp.MustCompile(`'captchaToken' value='(.+?)'`).FindStringSubmatch(res.String())[1],
|
||||
Lt: regexp.MustCompile(`lt = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
||||
ParamId: regexp.MustCompile(`paramId = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
||||
ReqId: regexp.MustCompile(`reqId = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
||||
// jRsaKey: regexp.MustCompile(`"j_rsaKey" value="(.+?)"`).FindStringSubmatch(res.String())[1],
|
||||
}, nil
|
||||
}
|
||||
|
||||
/* 初始化登陆需要的参数
|
||||
* 如果遇到验证码返回错误
|
||||
*/
|
||||
func (y *Cloud189PC) initLoginParam() error {
|
||||
y.loginParam = nil
|
||||
|
||||
baseParam, err := y.initBaseParams()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
y.loginParam = &LoginParam{BaseLoginParam: *baseParam}
|
||||
|
||||
// 获取rsa公钥
|
||||
var encryptConf EncryptConfResp
|
||||
_, err = y.client.R().
|
||||
@ -378,18 +498,17 @@ func (y *Cloud189PC) initLoginParam() error {
|
||||
return err
|
||||
}
|
||||
|
||||
param.jRsaKey = fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", encryptConf.Data.PubKey)
|
||||
param.RsaUsername = encryptConf.Data.Pre + RsaEncrypt(param.jRsaKey, y.Username)
|
||||
param.RsaPassword = encryptConf.Data.Pre + RsaEncrypt(param.jRsaKey, y.Password)
|
||||
y.loginParam = ¶m
|
||||
y.loginParam.jRsaKey = fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", encryptConf.Data.PubKey)
|
||||
y.loginParam.RsaUsername = encryptConf.Data.Pre + RsaEncrypt(y.loginParam.jRsaKey, y.Username)
|
||||
y.loginParam.RsaPassword = encryptConf.Data.Pre + RsaEncrypt(y.loginParam.jRsaKey, y.Password)
|
||||
|
||||
// 判断是否需要验证码
|
||||
resp, err := y.client.R().
|
||||
SetHeader("REQID", param.ReqId).
|
||||
SetHeader("REQID", y.loginParam.ReqId).
|
||||
SetFormData(map[string]string{
|
||||
"appKey": APP_ID,
|
||||
"accountType": ACCOUNT_TYPE,
|
||||
"userName": param.RsaUsername,
|
||||
"userName": y.loginParam.RsaUsername,
|
||||
}).Post(AUTH_URL + "/api/logbox/oauth2/needcaptcha.do")
|
||||
if err != nil {
|
||||
return err
|
||||
@ -401,8 +520,8 @@ func (y *Cloud189PC) initLoginParam() error {
|
||||
// 拉取验证码
|
||||
imgRes, err := y.client.R().
|
||||
SetQueryParams(map[string]string{
|
||||
"token": param.CaptchaToken,
|
||||
"REQID": param.ReqId,
|
||||
"token": y.loginParam.CaptchaToken,
|
||||
"REQID": y.loginParam.ReqId,
|
||||
"rnd": fmt.Sprint(timestamp()),
|
||||
}).
|
||||
Get(AUTH_URL + "/api/logbox/oauth2/picCaptcha.do")
|
||||
@ -429,10 +548,38 @@ func (y *Cloud189PC) initLoginParam() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getQRCode 获取并返回二维码
|
||||
func (y *Cloud189PC) initQRCodeParam() (err error) {
|
||||
y.qrcodeParam = nil
|
||||
|
||||
baseParam, err := y.initBaseParams()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var qrcodeParam QRLoginParam
|
||||
_, err = y.client.R().
|
||||
SetFormData(map[string]string{"appId": APP_ID}).
|
||||
ForceContentType("application/json;charset=UTF-8").
|
||||
SetResult(&qrcodeParam).
|
||||
Post(AUTH_URL + "/api/logbox/oauth2/getUUID.do")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
qrcodeParam.BaseLoginParam = *baseParam
|
||||
y.qrcodeParam = &qrcodeParam
|
||||
|
||||
return y.genQRCode("please scan the QR code with the 189 Cloud app, then save the settings again.")
|
||||
}
|
||||
|
||||
// 刷新会话
|
||||
func (y *Cloud189PC) refreshSession() (err error) {
|
||||
return y.refreshSessionWithRetry(0)
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) refreshSessionWithRetry(retryCount int) (err error) {
|
||||
if y.ref != nil {
|
||||
return y.ref.refreshSession()
|
||||
return y.ref.refreshSessionWithRetry(retryCount)
|
||||
}
|
||||
var erron RespErr
|
||||
var userSessionResp UserSessionResp
|
||||
@ -449,24 +596,87 @@ func (y *Cloud189PC) refreshSession() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// 错误影响正常访问,下线该储存
|
||||
defer func() {
|
||||
if err != nil {
|
||||
y.GetStorage().SetStatus(fmt.Sprintf("%+v", err.Error()))
|
||||
op.MustSaveDriverStorage(y)
|
||||
}
|
||||
}()
|
||||
|
||||
// token生效刷新token
|
||||
if erron.HasError() {
|
||||
if erron.ResCode == "UserInvalidOpenToken" {
|
||||
if err = y.login(); err != nil {
|
||||
return err
|
||||
}
|
||||
if erron.ResCode == UserInvalidOpenTokenError {
|
||||
return y.refreshTokenWithRetry(retryCount)
|
||||
}
|
||||
return &erron
|
||||
}
|
||||
y.tokenInfo.UserSessionResp = userSessionResp
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
// refreshToken 刷新token,失败时返回错误,不再直接调用login
|
||||
func (y *Cloud189PC) refreshToken() (err error) {
|
||||
return y.refreshTokenWithRetry(0)
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) refreshTokenWithRetry(retryCount int) (err error) {
|
||||
if y.ref != nil {
|
||||
return y.ref.refreshTokenWithRetry(retryCount)
|
||||
}
|
||||
|
||||
// 限制重试次数,避免无限递归
|
||||
if retryCount >= 3 {
|
||||
if y.Addition.RefreshToken != "" {
|
||||
y.Addition.RefreshToken = ""
|
||||
op.MustSaveDriverStorage(y)
|
||||
}
|
||||
return errors.New("refresh token failed after maximum retries")
|
||||
}
|
||||
|
||||
var erron RespErr
|
||||
var tokenInfo AppSessionResp
|
||||
_, err = y.client.R().
|
||||
SetResult(&tokenInfo).
|
||||
ForceContentType("application/json;charset=UTF-8").
|
||||
SetError(&erron).
|
||||
SetFormData(map[string]string{
|
||||
"clientId": APP_ID,
|
||||
"refreshToken": y.tokenInfo.RefreshToken,
|
||||
"grantType": "refresh_token",
|
||||
"format": "json",
|
||||
}).
|
||||
Post(AUTH_URL + "/api/oauth2/refreshToken.do")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 如果刷新失败,返回错误给上层处理
|
||||
if erron.HasError() {
|
||||
if y.Addition.RefreshToken != "" {
|
||||
y.Addition.RefreshToken = ""
|
||||
op.MustSaveDriverStorage(y)
|
||||
}
|
||||
|
||||
// 根据登录类型决定下一步行为
|
||||
if y.LoginType == "qrcode" {
|
||||
return errors.New("QR code session has expired, please re-scan the code to log in")
|
||||
}
|
||||
// 密码登录模式下,尝试回退到完整登录
|
||||
return y.login()
|
||||
}
|
||||
|
||||
y.Addition.RefreshToken = tokenInfo.RefreshToken
|
||||
y.tokenInfo = &tokenInfo
|
||||
op.MustSaveDriverStorage(y)
|
||||
return y.refreshSessionWithRetry(retryCount + 1)
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) keepAlive() {
|
||||
_, err := y.get(API_URL+"/keepUserSession.action", func(r *resty.Request) {
|
||||
r.SetQueryParams(clientSuffix())
|
||||
}, nil)
|
||||
if err != nil {
|
||||
utils.Log.Warnf("189pc: Failed to keep user session alive: %v", err)
|
||||
// 如果keepAlive失败,尝试刷新session
|
||||
if refreshErr := y.refreshSession(); refreshErr != nil {
|
||||
utils.Log.Errorf("189pc: Failed to refresh session after keepAlive error: %v", refreshErr)
|
||||
}
|
||||
} else {
|
||||
utils.Log.Debugf("189pc: User session kept alive successfully.")
|
||||
}
|
||||
}
|
||||
|
||||
// 普通上传
|
||||
@ -575,8 +785,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
|
||||
// step.4 上传切片
|
||||
uploadUrl := uploadUrls[0]
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
|
||||
driver.NewLimitedUploadStream(ctx, rateLimitedRd), isFamily)
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -79,21 +79,45 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
var ret *model.Object
|
||||
provider := ""
|
||||
for _, dst := range dsts {
|
||||
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
|
||||
rawPath := stdpath.Join(dst, sub)
|
||||
obj, err := fs.Get(ctx, rawPath, &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return &model.Object{
|
||||
Path: path,
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
storage, err := fs.GetStorage(rawPath, &fs.GetStoragesArgs{})
|
||||
if ret == nil {
|
||||
ret = &model.Object{
|
||||
Path: path,
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
}
|
||||
if !d.ProviderPassThrough || err != nil {
|
||||
break
|
||||
}
|
||||
provider = storage.Config().Name
|
||||
} else if err != nil || provider != storage.GetStorage().Driver {
|
||||
provider = ""
|
||||
break
|
||||
}
|
||||
}
|
||||
if ret == nil {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
if provider != "" {
|
||||
return &model.ObjectProvider{
|
||||
Object: *ret,
|
||||
Provider: model.Provider{
|
||||
Provider: provider,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return nil, errs.ObjectNotFound
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
@ -186,6 +210,35 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
func (d *Alias) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
root, sub := d.getRootAndPath(args.Obj.GetPath())
|
||||
dsts, ok := d.pathMap[root]
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
rawPath := stdpath.Join(dst, sub)
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(rawPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
other, ok := storage.(driver.Other)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
obj, err := op.GetUnwrap(ctx, storage, actualPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return other.Other(ctx, model.OtherArgs{
|
||||
Obj: obj,
|
||||
Method: args.Method,
|
||||
Data: args.Data,
|
||||
})
|
||||
}
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
if !d.Writable {
|
||||
return errs.PermissionDenied
|
||||
|
@ -15,6 +15,7 @@ type Addition struct {
|
||||
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
|
||||
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
|
||||
Writable bool `json:"writable" type:"bool" default:"false"`
|
||||
ProviderPassThrough bool `json:"provider_pass_through" type:"bool" default:"false"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -291,6 +291,21 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
res, err := d.request(ctx, limiterOther, "/adrive/v1.0/user/getSpaceInfo", http.MethodPost, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total := utils.Json.Get(res, "personal_space_info", "total_size").ToUint64()
|
||||
used := utils.Json.Get(res, "personal_space_info", "used_size").ToUint64()
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: total - used,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*AliyundriveOpen)(nil)
|
||||
var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
|
||||
var _ driver.MoveResult = (*AliyundriveOpen)(nil)
|
||||
|
@ -20,8 +20,10 @@ import (
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_netdisk"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_photo"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/chaoxing"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/chunk"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve_v4"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/cnb_releases"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/crypt"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/degoo"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao"
|
||||
|
@ -364,4 +364,12 @@ func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
du, err := d.quota()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{DiskUsage: *du}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*BaiduNetdisk)(nil)
|
||||
|
@ -189,3 +189,12 @@ type PrecreateResp struct {
|
||||
// return_type=2
|
||||
File File `json:"info"`
|
||||
}
|
||||
|
||||
type QuotaResp struct {
|
||||
Errno int `json:"errno"`
|
||||
RequestId int64 `json:"request_id"`
|
||||
Total uint64 `json:"total"`
|
||||
Used uint64 `json:"used"`
|
||||
//Free uint64 `json:"free"`
|
||||
//Expire bool `json:"expire"`
|
||||
}
|
||||
|
@ -381,6 +381,18 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
||||
return maxSliceSize
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) quota() (*model.DiskUsage, error) {
|
||||
var resp QuotaResp
|
||||
_, err := d.request("https://pan.baidu.com/api/quota", http.MethodGet, nil, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.DiskUsage{
|
||||
TotalSpace: resp.Total,
|
||||
FreeSpace: resp.Total - resp.Used,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// func encodeURIComponent(str string) string {
|
||||
// r := url.QueryEscape(str)
|
||||
// r = strings.ReplaceAll(r, "+", "%20")
|
||||
|
488
drivers/chunk/driver.go
Normal file
488
drivers/chunk/driver.go
Normal file
@ -0,0 +1,488 @@
|
||||
package chunk
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
)
|
||||
|
||||
type Chunk struct {
|
||||
model.Storage
|
||||
Addition
|
||||
}
|
||||
|
||||
func (d *Chunk) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Chunk) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Chunk) Init(ctx context.Context) error {
|
||||
if d.PartSize <= 0 {
|
||||
return errors.New("part size must be positive")
|
||||
}
|
||||
d.RemotePath = utils.FixAndCleanPath(d.RemotePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Chunk) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Chunk) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
if utils.PathEqual(path, "/") {
|
||||
return &model.Object{
|
||||
Name: "Root",
|
||||
IsFolder: true,
|
||||
Path: "/",
|
||||
}, nil
|
||||
}
|
||||
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remoteActualPath = stdpath.Join(remoteActualPath, path)
|
||||
if remoteObj, err := op.Get(ctx, remoteStorage, remoteActualPath); err == nil {
|
||||
return &model.Object{
|
||||
Path: path,
|
||||
Name: remoteObj.GetName(),
|
||||
Size: remoteObj.GetSize(),
|
||||
Modified: remoteObj.ModTime(),
|
||||
IsFolder: remoteObj.IsDir(),
|
||||
HashInfo: remoteObj.GetHash(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
remoteActualDir, name := stdpath.Split(remoteActualPath)
|
||||
chunkName := "[openlist_chunk]" + name
|
||||
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, chunkName), model.ListArgs{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var totalSize int64 = 0
|
||||
// 0号块必须存在
|
||||
chunkSizes := []int64{-1}
|
||||
h := make(map[*utils.HashType]string)
|
||||
var first model.Obj
|
||||
for _, o := range chunkObjs {
|
||||
if o.IsDir() {
|
||||
continue
|
||||
}
|
||||
if after, ok := strings.CutPrefix(o.GetName(), "hash_"); ok {
|
||||
hn, value, ok := strings.Cut(strings.TrimSuffix(after, d.CustomExt), "_")
|
||||
if ok {
|
||||
ht, ok := utils.GetHashByName(hn)
|
||||
if ok {
|
||||
h[ht] = value
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
totalSize += o.GetSize()
|
||||
if len(chunkSizes) > idx {
|
||||
if idx == 0 {
|
||||
first = o
|
||||
}
|
||||
chunkSizes[idx] = o.GetSize()
|
||||
} else if len(chunkSizes) == idx {
|
||||
chunkSizes = append(chunkSizes, o.GetSize())
|
||||
} else {
|
||||
newChunkSizes := make([]int64, idx+1)
|
||||
copy(newChunkSizes, chunkSizes)
|
||||
chunkSizes = newChunkSizes
|
||||
chunkSizes[idx] = o.GetSize()
|
||||
}
|
||||
}
|
||||
// 检查0号块不等于-1 以支持空文件
|
||||
// 如果块数量大于1 最后一块不可能为0
|
||||
// 只检查中间块是否有0
|
||||
for i, l := 0, len(chunkSizes)-2; ; i++ {
|
||||
if i == 0 {
|
||||
if chunkSizes[i] == -1 {
|
||||
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||
}
|
||||
} else if chunkSizes[i] == 0 {
|
||||
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||
}
|
||||
if i >= l {
|
||||
break
|
||||
}
|
||||
}
|
||||
reqDir, _ := stdpath.Split(path)
|
||||
objRes := chunkObject{
|
||||
Object: model.Object{
|
||||
Path: stdpath.Join(reqDir, chunkName),
|
||||
Name: name,
|
||||
Size: totalSize,
|
||||
Modified: first.ModTime(),
|
||||
Ctime: first.CreateTime(),
|
||||
},
|
||||
chunkSizes: chunkSizes,
|
||||
}
|
||||
if len(h) > 0 {
|
||||
objRes.HashInfo = utils.NewHashInfoByMap(h)
|
||||
}
|
||||
return &objRes, nil
|
||||
}
|
||||
|
||||
func (d *Chunk) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remoteActualDir := stdpath.Join(remoteActualPath, dir.GetPath())
|
||||
remoteObjs, err := op.List(ctx, remoteStorage, remoteActualDir, model.ListArgs{
|
||||
ReqPath: args.ReqPath,
|
||||
Refresh: args.Refresh,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([]model.Obj, 0, len(remoteObjs))
|
||||
for _, obj := range remoteObjs {
|
||||
rawName := obj.GetName()
|
||||
if obj.IsDir() {
|
||||
if name, ok := strings.CutPrefix(rawName, "[openlist_chunk]"); ok {
|
||||
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, rawName), model.ListArgs{
|
||||
ReqPath: stdpath.Join(args.ReqPath, rawName),
|
||||
Refresh: args.Refresh,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalSize := int64(0)
|
||||
h := make(map[*utils.HashType]string)
|
||||
first := obj
|
||||
for _, o := range chunkObjs {
|
||||
if o.IsDir() {
|
||||
continue
|
||||
}
|
||||
if after, ok := strings.CutPrefix(strings.TrimSuffix(o.GetName(), d.CustomExt), "hash_"); ok {
|
||||
hn, value, ok := strings.Cut(after, "_")
|
||||
if ok {
|
||||
ht, ok := utils.GetHashByName(hn)
|
||||
if ok {
|
||||
h[ht] = value
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if idx == 0 {
|
||||
first = o
|
||||
}
|
||||
totalSize += o.GetSize()
|
||||
}
|
||||
objRes := model.Object{
|
||||
Name: name,
|
||||
Size: totalSize,
|
||||
Modified: first.ModTime(),
|
||||
Ctime: first.CreateTime(),
|
||||
}
|
||||
if len(h) > 0 {
|
||||
objRes.HashInfo = utils.NewHashInfoByMap(h)
|
||||
}
|
||||
if !d.Thumbnail {
|
||||
result = append(result, &objRes)
|
||||
} else {
|
||||
thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp")
|
||||
thumb := fmt.Sprintf("%s/d%s?sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(thumbPath, true),
|
||||
sign.Sign(thumbPath))
|
||||
result = append(result, &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !d.ShowHidden && strings.HasPrefix(rawName, ".") {
|
||||
continue
|
||||
}
|
||||
thumb, ok := model.GetThumb(obj)
|
||||
objRes := model.Object{
|
||||
Name: rawName,
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
}
|
||||
if !ok {
|
||||
result = append(result, &objRes)
|
||||
} else {
|
||||
result = append(result, &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (d *Chunk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chunkFile, ok := file.(*chunkObject)
|
||||
remoteActualPath = stdpath.Join(remoteActualPath, file.GetPath())
|
||||
if !ok {
|
||||
l, _, err := op.Link(ctx, remoteStorage, remoteActualPath, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resultLink := *l
|
||||
resultLink.SyncClosers = utils.NewSyncClosers(l)
|
||||
return &resultLink, nil
|
||||
}
|
||||
fileSize := chunkFile.GetSize()
|
||||
mergedRrf := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
start := httpRange.Start
|
||||
length := httpRange.Length
|
||||
if length < 0 || start+length > fileSize {
|
||||
length = fileSize - start
|
||||
}
|
||||
if length == 0 {
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
}
|
||||
rs := make([]io.Reader, 0)
|
||||
cs := make(utils.Closers, 0)
|
||||
var (
|
||||
rc io.ReadCloser
|
||||
readFrom bool
|
||||
)
|
||||
for idx, chunkSize := range chunkFile.chunkSizes {
|
||||
if readFrom {
|
||||
l, o, err := op.Link(ctx, remoteStorage, stdpath.Join(remoteActualPath, d.getPartName(idx)), args)
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
cs = append(cs, l)
|
||||
chunkSize2 := l.ContentLength
|
||||
if chunkSize2 <= 0 {
|
||||
chunkSize2 = o.GetSize()
|
||||
}
|
||||
if chunkSize2 != chunkSize {
|
||||
_ = cs.Close()
|
||||
return nil, fmt.Errorf("chunk part[%d] size not match", idx)
|
||||
}
|
||||
rrf, err := stream.GetRangeReaderFromLink(chunkSize2, l)
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
newLength := length - chunkSize2
|
||||
if newLength >= 0 {
|
||||
length = newLength
|
||||
rc, err = rrf.RangeRead(ctx, http_range.Range{Length: -1})
|
||||
} else {
|
||||
rc, err = rrf.RangeRead(ctx, http_range.Range{Length: length})
|
||||
}
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
rs = append(rs, rc)
|
||||
cs = append(cs, rc)
|
||||
if newLength <= 0 {
|
||||
return utils.ReadCloser{
|
||||
Reader: io.MultiReader(rs...),
|
||||
Closer: &cs,
|
||||
}, nil
|
||||
}
|
||||
} else if newStart := start - chunkSize; newStart >= 0 {
|
||||
start = newStart
|
||||
} else {
|
||||
l, o, err := op.Link(ctx, remoteStorage, stdpath.Join(remoteActualPath, d.getPartName(idx)), args)
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
cs = append(cs, l)
|
||||
chunkSize2 := l.ContentLength
|
||||
if chunkSize2 <= 0 {
|
||||
chunkSize2 = o.GetSize()
|
||||
}
|
||||
if chunkSize2 != chunkSize {
|
||||
_ = cs.Close()
|
||||
return nil, fmt.Errorf("chunk part[%d] size not match", idx)
|
||||
}
|
||||
rrf, err := stream.GetRangeReaderFromLink(chunkSize2, l)
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
rc, err = rrf.RangeRead(ctx, http_range.Range{Start: start, Length: -1})
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
length -= chunkSize2 - start
|
||||
cs = append(cs, rc)
|
||||
if length <= 0 {
|
||||
return utils.ReadCloser{
|
||||
Reader: rc,
|
||||
Closer: &cs,
|
||||
}, nil
|
||||
}
|
||||
rs = append(rs, rc)
|
||||
readFrom = true
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("invalid range: start=%d,length=%d,fileSize=%d", httpRange.Start, httpRange.Length, fileSize)
|
||||
}
|
||||
return &model.Link{
|
||||
RangeReader: stream.RangeReaderFunc(mergedRrf),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Chunk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
path := stdpath.Join(d.RemotePath, parentDir.GetPath(), dirName)
|
||||
return fs.MakeDir(ctx, path)
|
||||
}
|
||||
|
||||
func (d *Chunk) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
src := stdpath.Join(d.RemotePath, srcObj.GetPath())
|
||||
dst := stdpath.Join(d.RemotePath, dstDir.GetPath())
|
||||
_, err := fs.Move(ctx, src, dst)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Chunk) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if _, ok := srcObj.(*chunkObject); ok {
|
||||
newName = "[openlist_chunk]" + newName
|
||||
}
|
||||
return fs.Rename(ctx, stdpath.Join(d.RemotePath, srcObj.GetPath()), newName)
|
||||
}
|
||||
|
||||
func (d *Chunk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
dst := stdpath.Join(d.RemotePath, dstDir.GetPath())
|
||||
src := stdpath.Join(d.RemotePath, srcObj.GetPath())
|
||||
_, err := fs.Copy(ctx, src, dst)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Chunk) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return fs.Remove(ctx, stdpath.Join(d.RemotePath, obj.GetPath()))
|
||||
}
|
||||
|
||||
func (d *Chunk) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.Thumbnail && dstDir.GetName() == ".thumbnails" {
|
||||
return op.Put(ctx, remoteStorage, stdpath.Join(remoteActualPath, dstDir.GetPath()), file, up)
|
||||
}
|
||||
upReader := &driver.ReaderUpdatingProgress{
|
||||
Reader: file,
|
||||
UpdateProgress: up,
|
||||
}
|
||||
dst := stdpath.Join(remoteActualPath, dstDir.GetPath(), "[openlist_chunk]"+file.GetName())
|
||||
if d.StoreHash {
|
||||
for ht, value := range file.GetHash().All() {
|
||||
_ = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: fmt.Sprintf("hash_%s_%s%s", ht.Name, value, d.CustomExt),
|
||||
Size: 1,
|
||||
Modified: file.ModTime(),
|
||||
},
|
||||
Mimetype: "application/octet-stream",
|
||||
Reader: bytes.NewReader([]byte{0}), // 兼容不支持空文件的驱动
|
||||
}, nil, true)
|
||||
}
|
||||
}
|
||||
fullPartCount := int(file.GetSize() / d.PartSize)
|
||||
tailSize := file.GetSize() % d.PartSize
|
||||
if tailSize == 0 && fullPartCount > 0 {
|
||||
fullPartCount--
|
||||
tailSize = d.PartSize
|
||||
}
|
||||
partIndex := 0
|
||||
for partIndex < fullPartCount {
|
||||
err = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: d.getPartName(partIndex),
|
||||
Size: d.PartSize,
|
||||
Modified: file.ModTime(),
|
||||
},
|
||||
Mimetype: file.GetMimetype(),
|
||||
Reader: io.LimitReader(upReader, d.PartSize),
|
||||
}, nil, true)
|
||||
if err != nil {
|
||||
_ = op.Remove(ctx, remoteStorage, dst)
|
||||
return err
|
||||
}
|
||||
partIndex++
|
||||
}
|
||||
err = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: d.getPartName(fullPartCount),
|
||||
Size: tailSize,
|
||||
Modified: file.ModTime(),
|
||||
},
|
||||
Mimetype: file.GetMimetype(),
|
||||
Reader: upReader,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
_ = op.Remove(ctx, remoteStorage, dst)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Chunk) getPartName(part int) string {
|
||||
return fmt.Sprintf("%d%s", part, d.CustomExt)
|
||||
}
|
||||
|
||||
func (d *Chunk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
remoteStorage, err := fs.GetStorage(d.RemotePath, &fs.GetStoragesArgs{})
|
||||
if err != nil {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
wd, ok := remoteStorage.(driver.WithDetails)
|
||||
if !ok {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
remoteDetails, err := wd.GetDetails(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: remoteDetails.DiskUsage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Chunk)(nil)
|
31
drivers/chunk/meta.go
Normal file
31
drivers/chunk/meta.go
Normal file
@ -0,0 +1,31 @@
|
||||
package chunk
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
RemotePath string `json:"remote_path" required:"true"`
|
||||
PartSize int64 `json:"part_size" required:"true" type:"number" help:"bytes"`
|
||||
CustomExt string `json:"custom_ext" type:"string"`
|
||||
StoreHash bool `json:"store_hash" type:"bool" default:"true"`
|
||||
|
||||
Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"`
|
||||
ShowHidden bool `json:"show_hidden" default:"true" required:"false" help:"show hidden directories and files"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Chunk",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
NoCache: true,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Chunk{}
|
||||
})
|
||||
}
|
8
drivers/chunk/obj.go
Normal file
8
drivers/chunk/obj.go
Normal file
@ -0,0 +1,8 @@
|
||||
package chunk
|
||||
|
||||
import "github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
|
||||
type chunkObject struct {
|
||||
model.Object
|
||||
chunkSizes []int64
|
||||
}
|
@ -20,7 +20,9 @@ import (
|
||||
type CloudreveV4 struct {
|
||||
model.Storage
|
||||
Addition
|
||||
ref *CloudreveV4
|
||||
ref *CloudreveV4
|
||||
AccessExpires string
|
||||
RefreshExpires string
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Config() driver.Config {
|
||||
@ -44,13 +46,17 @@ func (d *CloudreveV4) Init(ctx context.Context) error {
|
||||
if d.ref != nil {
|
||||
return nil
|
||||
}
|
||||
if d.AccessToken == "" && d.RefreshToken != "" {
|
||||
return d.refreshToken()
|
||||
}
|
||||
if d.Username != "" {
|
||||
if d.canLogin() {
|
||||
return d.login()
|
||||
}
|
||||
return nil
|
||||
if d.RefreshToken != "" {
|
||||
return d.refreshToken()
|
||||
}
|
||||
if d.AccessToken == "" {
|
||||
return errors.New("no way to authenticate. At least AccessToken is required")
|
||||
}
|
||||
// ensure AccessToken is valid
|
||||
return d.parseJWT(d.AccessToken, &AccessJWT{})
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) InitReference(storage driver.Driver) error {
|
||||
@ -333,6 +339,21 @@ func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir mode
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
// TODO return storage details (total space, free space, etc.)
|
||||
var r CapacityResp
|
||||
err := d.request(http.MethodGet, "/user/capacity", nil, &r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: r.Total,
|
||||
FreeSpace: r.Total - r.Used,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
@ -66,11 +66,27 @@ type CaptchaResp struct {
|
||||
Ticket string `json:"ticket"`
|
||||
}
|
||||
|
||||
type AccessJWT struct {
|
||||
TokenType string `json:"token_type"`
|
||||
Sub string `json:"sub"`
|
||||
Exp int64 `json:"exp"`
|
||||
Nbf int64 `json:"nbf"`
|
||||
}
|
||||
|
||||
type RefreshJWT struct {
|
||||
TokenType string `json:"token_type"`
|
||||
Sub string `json:"sub"`
|
||||
Exp int `json:"exp"`
|
||||
Nbf int `json:"nbf"`
|
||||
StateHash string `json:"state_hash"`
|
||||
RootTokenID string `json:"root_token_id"`
|
||||
}
|
||||
|
||||
type Token struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
AccessExpires time.Time `json:"access_expires"`
|
||||
RefreshExpires time.Time `json:"refresh_expires"`
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
AccessExpires string `json:"access_expires"`
|
||||
RefreshExpires string `json:"refresh_expires"`
|
||||
}
|
||||
|
||||
type TokenResponse struct {
|
||||
@ -188,3 +204,9 @@ type FolderSummaryResp struct {
|
||||
CalculatedAt time.Time `json:"calculated_at"`
|
||||
} `json:"folder_summary"`
|
||||
}
|
||||
|
||||
type CapacityResp struct {
|
||||
Total uint64 `json:"total"`
|
||||
Used uint64 `json:"used"`
|
||||
// StoragePackTotal uint64 `json:"storage_pack_total"`
|
||||
}
|
||||
|
@ -28,6 +28,15 @@ import (
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
const (
|
||||
CodeLoginRequired = http.StatusUnauthorized
|
||||
CodeCredentialInvalid = 40020 // Failed to issue token
|
||||
)
|
||||
|
||||
var (
|
||||
ErrorIssueToken = errors.New("failed to issue token")
|
||||
)
|
||||
|
||||
func (d *CloudreveV4) getUA() string {
|
||||
if d.CustomUA != "" {
|
||||
return d.CustomUA
|
||||
@ -39,6 +48,23 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
|
||||
if d.ref != nil {
|
||||
return d.ref.request(method, path, callback, out)
|
||||
}
|
||||
|
||||
// ensure token
|
||||
if d.isTokenExpired() {
|
||||
err := d.refreshToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return d._request(method, path, callback, out)
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) _request(method string, path string, callback base.ReqCallback, out any) error {
|
||||
if d.ref != nil {
|
||||
return d.ref._request(method, path, callback, out)
|
||||
}
|
||||
|
||||
u := d.Address + "/api/v4" + path
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
@ -65,15 +91,17 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
|
||||
}
|
||||
|
||||
if r.Code != 0 {
|
||||
if r.Code == 401 && d.RefreshToken != "" && path != "/session/token/refresh" {
|
||||
// try to refresh token
|
||||
err = d.refreshToken()
|
||||
if r.Code == CodeLoginRequired && d.canLogin() && path != "/session/token/refresh" {
|
||||
err = d.login()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return d.request(method, path, callback, out)
|
||||
}
|
||||
return errors.New(r.Msg)
|
||||
if r.Code == CodeCredentialInvalid {
|
||||
return ErrorIssueToken
|
||||
}
|
||||
return fmt.Errorf("%d: %s", r.Code, r.Msg)
|
||||
}
|
||||
|
||||
if out != nil && r.Data != nil {
|
||||
@ -91,14 +119,18 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) canLogin() bool {
|
||||
return d.Username != "" && d.Password != ""
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) login() error {
|
||||
var siteConfig SiteLoginConfigResp
|
||||
err := d.request(http.MethodGet, "/site/config/login", nil, &siteConfig)
|
||||
err := d._request(http.MethodGet, "/site/config/login", nil, &siteConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var prepareLogin PrepareLoginResp
|
||||
err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
|
||||
err = d._request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -128,7 +160,7 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
|
||||
}
|
||||
if needCaptcha {
|
||||
var config BasicConfigResp
|
||||
err = d.request(http.MethodGet, "/site/config/basic", nil, &config)
|
||||
err = d._request(http.MethodGet, "/site/config/basic", nil, &config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -136,7 +168,7 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
|
||||
return fmt.Errorf("captcha type %s not support", config.CaptchaType)
|
||||
}
|
||||
var captcha CaptchaResp
|
||||
err = d.request(http.MethodGet, "/site/captcha", nil, &captcha)
|
||||
err = d._request(http.MethodGet, "/site/captcha", nil, &captcha)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -162,20 +194,22 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
|
||||
loginBody["captcha"] = captchaCode
|
||||
}
|
||||
var token TokenResponse
|
||||
err = d.request(http.MethodPost, "/session/token", func(req *resty.Request) {
|
||||
err = d._request(http.MethodPost, "/session/token", func(req *resty.Request) {
|
||||
req.SetBody(loginBody)
|
||||
}, &token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken
|
||||
d.AccessExpires, d.RefreshExpires = token.Token.AccessExpires, token.Token.RefreshExpires
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) refreshToken() error {
|
||||
// if no refresh token, try to login if possible
|
||||
if d.RefreshToken == "" {
|
||||
if d.Username != "" {
|
||||
if d.canLogin() {
|
||||
err := d.login()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot login to get refresh token, error: %s", err)
|
||||
@ -183,20 +217,127 @@ func (d *CloudreveV4) refreshToken() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parse jwt to check if refresh token is valid
|
||||
var jwt RefreshJWT
|
||||
err := d.parseJWT(d.RefreshToken, &jwt)
|
||||
if err != nil {
|
||||
// if refresh token is invalid, try to login if possible
|
||||
if d.canLogin() {
|
||||
return d.login()
|
||||
}
|
||||
d.GetStorage().SetStatus(fmt.Sprintf("Invalid RefreshToken: %s", err.Error()))
|
||||
op.MustSaveDriverStorage(d)
|
||||
return fmt.Errorf("invalid refresh token: %w", err)
|
||||
}
|
||||
|
||||
// do refresh token
|
||||
var token Token
|
||||
err := d.request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
|
||||
err = d._request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"refresh_token": d.RefreshToken,
|
||||
})
|
||||
}, &token)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrorIssueToken) {
|
||||
if d.canLogin() {
|
||||
// try to login again
|
||||
return d.login()
|
||||
}
|
||||
d.GetStorage().SetStatus("This session is no longer valid")
|
||||
op.MustSaveDriverStorage(d)
|
||||
return ErrorIssueToken
|
||||
}
|
||||
return err
|
||||
}
|
||||
d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken
|
||||
d.AccessExpires, d.RefreshExpires = token.AccessExpires, token.RefreshExpires
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) parseJWT(token string, jwt any) error {
|
||||
split := strings.Split(token, ".")
|
||||
if len(split) != 3 {
|
||||
return fmt.Errorf("invalid token length: %d, ensure the token is a valid JWT", len(split))
|
||||
}
|
||||
data, err := base64.RawURLEncoding.DecodeString(split[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid token encoding: %w, ensure the token is a valid JWT", err)
|
||||
}
|
||||
err = json.Unmarshal(data, &jwt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid token content: %w, ensure the token is a valid JWT", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// check if token is expired
|
||||
// https://github.com/cloudreve/frontend/blob/ddfacc1c31c49be03beb71de4cc114c8811038d6/src/session/index.ts#L177-L200
|
||||
func (d *CloudreveV4) isTokenExpired() bool {
|
||||
if d.RefreshToken == "" {
|
||||
// login again if username and password is set
|
||||
if d.canLogin() {
|
||||
return true
|
||||
}
|
||||
// no refresh token, cannot refresh
|
||||
return false
|
||||
}
|
||||
if d.AccessToken == "" {
|
||||
return true
|
||||
}
|
||||
var (
|
||||
err error
|
||||
expires time.Time
|
||||
)
|
||||
// check if token is expired
|
||||
if d.AccessExpires != "" {
|
||||
// use expires field if possible to prevent timezone issue
|
||||
// only available after login or refresh token
|
||||
// 2025-08-28T02:43:07.645109985+08:00
|
||||
expires, err = time.Parse(time.RFC3339Nano, d.AccessExpires)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
// fallback to parse jwt
|
||||
// if failed, disable the storage
|
||||
var jwt AccessJWT
|
||||
err = d.parseJWT(d.AccessToken, &jwt)
|
||||
if err != nil {
|
||||
d.GetStorage().SetStatus(fmt.Sprintf("Invalid AccessToken: %s", err.Error()))
|
||||
op.MustSaveDriverStorage(d)
|
||||
return false
|
||||
}
|
||||
// may be have timezone issue
|
||||
expires = time.Unix(jwt.Exp, 0)
|
||||
}
|
||||
// add a 10 minutes safe margin
|
||||
ddl := time.Now().Add(10 * time.Minute)
|
||||
if expires.Before(ddl) {
|
||||
// current access token expired, check if refresh token is expired
|
||||
// warning: cannot parse refresh token from jwt, because the exp field is not standard
|
||||
if d.RefreshExpires != "" {
|
||||
refreshExpires, err := time.Parse(time.RFC3339Nano, d.RefreshExpires)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if refreshExpires.Before(time.Now()) {
|
||||
// This session is no longer valid
|
||||
if d.canLogin() {
|
||||
// try to login again
|
||||
return true
|
||||
}
|
||||
d.GetStorage().SetStatus("This session is no longer valid")
|
||||
op.MustSaveDriverStorage(d)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
|
230
drivers/cnb_releases/driver.go
Normal file
230
drivers/cnb_releases/driver.go
Normal file
@ -0,0 +1,230 @@
|
||||
package cnb_releases
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
type CnbReleases struct {
|
||||
model.Storage
|
||||
Addition
|
||||
ref *CnbReleases
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *CnbReleases) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Init(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CnbReleases) InitReference(storage driver.Driver) error {
|
||||
refStorage, ok := storage.(*CnbReleases)
|
||||
if ok {
|
||||
d.ref = refStorage
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("ref: storage is not CnbReleases")
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Drop(ctx context.Context) error {
|
||||
d.ref = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CnbReleases) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if dir.GetPath() == "/" {
|
||||
// get all releases for root dir
|
||||
var resp ReleaseList
|
||||
|
||||
err := d.Request(http.MethodGet, "/{repo}/-/releases", func(req *resty.Request) {
|
||||
req.SetPathParam("repo", d.Repo)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return utils.SliceConvert(resp, func(src Release) (model.Obj, error) {
|
||||
name := src.Name
|
||||
if d.UseTagName {
|
||||
name = src.TagName
|
||||
}
|
||||
return &model.Object{
|
||||
ID: src.ID,
|
||||
Name: name,
|
||||
Size: d.sumAssetsSize(src.Assets),
|
||||
Ctime: src.CreatedAt,
|
||||
Modified: src.UpdatedAt,
|
||||
IsFolder: true,
|
||||
}, nil
|
||||
})
|
||||
} else {
|
||||
// get release info by release id
|
||||
releaseID := dir.GetID()
|
||||
if releaseID == "" {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
var resp Release
|
||||
err := d.Request(http.MethodGet, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
|
||||
req.SetPathParam("repo", d.Repo)
|
||||
req.SetPathParam("release_id", releaseID)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return utils.SliceConvert(resp.Assets, func(src ReleaseAsset) (model.Obj, error) {
|
||||
return &Object{
|
||||
Object: model.Object{
|
||||
ID: src.ID,
|
||||
Path: src.Path,
|
||||
Name: src.Name,
|
||||
Size: src.Size,
|
||||
Ctime: src.CreatedAt,
|
||||
Modified: src.UpdatedAt,
|
||||
IsFolder: false,
|
||||
},
|
||||
ParentID: dir.GetID(),
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
return &model.Link{
|
||||
URL: "https://cnb.cool" + file.GetPath(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *CnbReleases) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
if parentDir.GetPath() == "/" {
|
||||
// create a new release
|
||||
branch := d.DefaultBranch
|
||||
if branch == "" {
|
||||
branch = "main" // fallback to "main" if not set
|
||||
}
|
||||
return d.Request(http.MethodPost, "/{repo}/-/releases", func(req *resty.Request) {
|
||||
req.SetPathParam("repo", d.Repo)
|
||||
req.SetBody(base.Json{
|
||||
"name": dirName,
|
||||
"tag_name": dirName,
|
||||
"target_commitish": branch,
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if srcObj.IsDir() && !d.UseTagName {
|
||||
return d.Request(http.MethodPatch, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
|
||||
req.SetPathParam("repo", d.Repo)
|
||||
req.SetPathParam("release_id", srcObj.GetID())
|
||||
req.SetFormData(map[string]string{
|
||||
"name": newName,
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if obj.IsDir() {
|
||||
return d.Request(http.MethodDelete, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
|
||||
req.SetPathParam("repo", d.Repo)
|
||||
req.SetPathParam("release_id", obj.GetID())
|
||||
}, nil)
|
||||
}
|
||||
if o, ok := obj.(*Object); ok {
|
||||
return d.Request(http.MethodDelete, "/{repo}/-/releases/{release_id}/assets/{asset_id}", func(req *resty.Request) {
|
||||
req.SetPathParam("repo", d.Repo)
|
||||
req.SetPathParam("release_id", o.ParentID)
|
||||
req.SetPathParam("asset_id", obj.GetID())
|
||||
}, nil)
|
||||
} else {
|
||||
return fmt.Errorf("unable to get release ID")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
// 1. get upload info
|
||||
var resp ReleaseAssetUploadURL
|
||||
err := d.Request(http.MethodPost, "/{repo}/-/releases/{release_id}/asset-upload-url", func(req *resty.Request) {
|
||||
req.SetPathParam("repo", d.Repo)
|
||||
req.SetPathParam("release_id", dstDir.GetID())
|
||||
req.SetBody(base.Json{
|
||||
"asset_name": file.GetName(),
|
||||
"overwrite": true,
|
||||
"size": file.GetSize(),
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 2. upload file
|
||||
// use multipart to create form file
|
||||
var b bytes.Buffer
|
||||
w := multipart.NewWriter(&b)
|
||||
_, err = w.CreateFormFile("file", file.GetName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headSize := b.Len()
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
head := bytes.NewReader(b.Bytes()[:headSize])
|
||||
tail := bytes.NewReader(b.Bytes()[headSize:])
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.MultiReader(head, file, tail))
|
||||
|
||||
// use net/http to upload file
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Duration(resp.ExpiresInSec+1)*time.Second)
|
||||
defer cancel()
|
||||
req, err := http.NewRequestWithContext(ctxWithTimeout, http.MethodPost, resp.UploadURL, rateLimitedRd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", w.FormDataContentType())
|
||||
req.Header.Set("User-Agent", base.UserAgent)
|
||||
httpResp, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer httpResp.Body.Close()
|
||||
if httpResp.StatusCode != http.StatusNoContent {
|
||||
return fmt.Errorf("upload file failed: %s", httpResp.Status)
|
||||
}
|
||||
|
||||
// 3. verify upload
|
||||
return d.Request(http.MethodPost, resp.VerifyURL, nil, nil)
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*CnbReleases)(nil)
|
26
drivers/cnb_releases/meta.go
Normal file
26
drivers/cnb_releases/meta.go
Normal file
@ -0,0 +1,26 @@
|
||||
package cnb_releases
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
Repo string `json:"repo" type:"string" required:"true"`
|
||||
Token string `json:"token" type:"string" required:"true"`
|
||||
UseTagName bool `json:"use_tag_name" type:"bool" default:"false" help:"Use tag name instead of release name"`
|
||||
DefaultBranch string `json:"default_branch" type:"string" default:"main" help:"Default branch for new releases"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "CNB Releases",
|
||||
LocalSort: true,
|
||||
DefaultRoot: "/",
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &CnbReleases{}
|
||||
})
|
||||
}
|
100
drivers/cnb_releases/types.go
Normal file
100
drivers/cnb_releases/types.go
Normal file
@ -0,0 +1,100 @@
|
||||
package cnb_releases
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
)
|
||||
|
||||
type Object struct {
|
||||
model.Object
|
||||
ParentID string
|
||||
}
|
||||
|
||||
type TagList []Tag
|
||||
|
||||
type Tag struct {
|
||||
Commit struct {
|
||||
Author UserInfo `json:"author"`
|
||||
Commit CommitObject `json:"commit"`
|
||||
Committer UserInfo `json:"committer"`
|
||||
Parents []CommitParent `json:"parents"`
|
||||
Sha string `json:"sha"`
|
||||
} `json:"commit"`
|
||||
Name string `json:"name"`
|
||||
Target string `json:"target"`
|
||||
TargetType string `json:"target_type"`
|
||||
Verification TagObjectVerification `json:"verification"`
|
||||
}
|
||||
|
||||
type UserInfo struct {
|
||||
Freeze bool `json:"freeze"`
|
||||
Nickname string `json:"nickname"`
|
||||
Username string `json:"username"`
|
||||
}
|
||||
|
||||
type CommitObject struct {
|
||||
Author Signature `json:"author"`
|
||||
CommentCount int `json:"comment_count"`
|
||||
Committer Signature `json:"committer"`
|
||||
Message string `json:"message"`
|
||||
Tree CommitObjectTree `json:"tree"`
|
||||
Verification CommitObjectVerification `json:"verification"`
|
||||
}
|
||||
|
||||
type Signature struct {
|
||||
Date time.Time `json:"date"`
|
||||
Email string `json:"email"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type CommitObjectTree struct {
|
||||
Sha string `json:"sha"`
|
||||
}
|
||||
|
||||
type CommitObjectVerification struct {
|
||||
Payload string `json:"payload"`
|
||||
Reason string `json:"reason"`
|
||||
Signature string `json:"signature"`
|
||||
Verified bool `json:"verified"`
|
||||
VerifiedAt string `json:"verified_at"`
|
||||
}
|
||||
|
||||
type CommitParent = CommitObjectTree
|
||||
|
||||
type TagObjectVerification = CommitObjectVerification
|
||||
|
||||
type ReleaseList []Release
|
||||
|
||||
type Release struct {
|
||||
Assets []ReleaseAsset `json:"assets"`
|
||||
Author UserInfo `json:"author"`
|
||||
Body string `json:"body"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Draft bool `json:"draft"`
|
||||
ID string `json:"id"`
|
||||
IsLatest bool `json:"is_latest"`
|
||||
Name string `json:"name"`
|
||||
Prerelease bool `json:"prerelease"`
|
||||
PublishedAt time.Time `json:"published_at"`
|
||||
TagCommitish string `json:"tag_commitish"`
|
||||
TagName string `json:"tag_name"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
}
|
||||
|
||||
type ReleaseAsset struct {
|
||||
ContentType string `json:"content_type"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Size int64 `json:"size"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
Uploader UserInfo `json:"uploader"`
|
||||
}
|
||||
|
||||
type ReleaseAssetUploadURL struct {
|
||||
UploadURL string `json:"upload_url"`
|
||||
ExpiresInSec int `json:"expires_in_sec"`
|
||||
VerifyURL string `json:"verify_url"`
|
||||
}
|
58
drivers/cnb_releases/util.go
Normal file
58
drivers/cnb_releases/util.go
Normal file
@ -0,0 +1,58 @@
|
||||
package cnb_releases
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
func (d *CnbReleases) Request(method string, path string, callback base.ReqCallback, resp any) error {
|
||||
if d.ref != nil {
|
||||
return d.ref.Request(method, path, callback, resp)
|
||||
}
|
||||
var url string
|
||||
if strings.HasPrefix(path, "http") {
|
||||
url = path
|
||||
} else {
|
||||
url = "https://api.cnb.cool" + path
|
||||
}
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeader("Accept", "application/json")
|
||||
req.SetAuthScheme("Bearer")
|
||||
req.SetAuthToken(d.Token)
|
||||
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
res, err := req.Execute(method, url)
|
||||
log.Debugln(res.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode() != http.StatusOK && res.StatusCode() != http.StatusCreated && res.StatusCode() != http.StatusNoContent {
|
||||
return fmt.Errorf("failed to request %s, status code: %d, message: %s", url, res.StatusCode(), res.String())
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
err = json.Unmarshal(res.Body(), resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CnbReleases) sumAssetsSize(assets []ReleaseAsset) int64 {
|
||||
var size int64
|
||||
for _, asset := range assets {
|
||||
size += asset.Size
|
||||
}
|
||||
return size
|
||||
}
|
@ -411,6 +411,20 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Crypt) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
wd, ok := d.remoteStorage.(driver.WithDetails)
|
||||
if !ok {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
remoteDetails, err := wd.GetDetails(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: remoteDetails.DiskUsage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//func (d *Safe) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
@ -51,7 +51,7 @@ func (d *Local) Config() driver.Config {
|
||||
|
||||
func (d *Local) Init(ctx context.Context) error {
|
||||
if d.MkdirPerm == "" {
|
||||
d.mkdirPerm = 0777
|
||||
d.mkdirPerm = 0o777
|
||||
} else {
|
||||
v, err := strconv.ParseUint(d.MkdirPerm, 8, 32)
|
||||
if err != nil {
|
||||
@ -150,6 +150,7 @@ func (d *Local) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string, fullPath string) model.Obj {
|
||||
thumb := ""
|
||||
if d.Thumbnail {
|
||||
@ -198,7 +199,7 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
path = filepath.Join(d.GetRootPath(), path)
|
||||
f, err := os.Stat(path)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "cannot find the file") {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
return nil, err
|
||||
@ -374,6 +375,13 @@ func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
|
||||
err = os.Remove(obj.GetPath())
|
||||
}
|
||||
} else {
|
||||
if !utils.Exists(d.RecycleBinPath) {
|
||||
err = os.MkdirAll(d.RecycleBinPath, 0o755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
dstPath := filepath.Join(d.RecycleBinPath, obj.GetName())
|
||||
if utils.Exists(dstPath) {
|
||||
dstPath = filepath.Join(d.RecycleBinPath, obj.GetName()+"_"+time.Now().Format("20060102150405"))
|
||||
@ -427,4 +435,14 @@ func (d *Local) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Local) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
du, err := getDiskUsage(d.RootFolderPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: du,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Local)(nil)
|
||||
|
@ -5,8 +5,25 @@ package local
|
||||
import (
|
||||
"io/fs"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
)
|
||||
|
||||
func isHidden(f fs.FileInfo, _ string) bool {
|
||||
return strings.HasPrefix(f.Name(), ".")
|
||||
}
|
||||
|
||||
func getDiskUsage(path string) (model.DiskUsage, error) {
|
||||
var stat syscall.Statfs_t
|
||||
err := syscall.Statfs(path, &stat)
|
||||
if err != nil {
|
||||
return model.DiskUsage{}, err
|
||||
}
|
||||
total := stat.Blocks * uint64(stat.Bsize)
|
||||
free := stat.Bfree * uint64(stat.Bsize)
|
||||
return model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
}, nil
|
||||
}
|
||||
|
@ -3,9 +3,13 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func isHidden(f fs.FileInfo, fullPath string) bool {
|
||||
@ -20,3 +24,28 @@ func isHidden(f fs.FileInfo, fullPath string) bool {
|
||||
}
|
||||
return attrs&syscall.FILE_ATTRIBUTE_HIDDEN != 0
|
||||
}
|
||||
|
||||
func getDiskUsage(path string) (model.DiskUsage, error) {
|
||||
abs, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return model.DiskUsage{}, err
|
||||
}
|
||||
root := filepath.VolumeName(abs)
|
||||
if len(root) != 2 || root[1] != ':' {
|
||||
return model.DiskUsage{}, errors.New("cannot get disk label")
|
||||
}
|
||||
var freeBytes, totalBytes, totalFreeBytes uint64
|
||||
err = windows.GetDiskFreeSpaceEx(
|
||||
windows.StringToUTF16Ptr(root),
|
||||
&freeBytes,
|
||||
&totalBytes,
|
||||
&totalFreeBytes,
|
||||
)
|
||||
if err != nil {
|
||||
return model.DiskUsage{}, err
|
||||
}
|
||||
return model.DiskUsage{
|
||||
TotalSpace: totalBytes,
|
||||
FreeSpace: freeBytes,
|
||||
}, nil
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
@ -127,4 +128,22 @@ func (d *SFTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *SFTP) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
stat, err := d.client.StatVFS(d.RootFolderPath)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "unimplemented") {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
total := stat.Blocks * stat.Bsize
|
||||
free := stat.Bfree * stat.Bsize
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*SFTP)(nil)
|
||||
|
@ -205,6 +205,22 @@ func (d *SMB) Put(ctx context.Context, dstDir model.Obj, stream model.FileStream
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *SMB) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
if err := d.checkConn(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := d.fs.Statfs(d.RootFolderPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: stat.BlockSize() * stat.TotalBlockCount(),
|
||||
FreeSpace: stat.BlockSize() * stat.AvailableBlockCount(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
//func (d *SMB) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
@ -93,6 +93,11 @@ func (d *Template) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.O
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Template) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
// TODO return storage details (total space, free space, etc.)
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
@ -1,10 +1,11 @@
|
||||
package archives
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
@ -107,7 +108,7 @@ func (Archives) Decompress(ss []*stream.SeekableStream, outputPath string, args
|
||||
}
|
||||
if stat.IsDir() {
|
||||
isDir = true
|
||||
outputPath = stdpath.Join(outputPath, stat.Name())
|
||||
outputPath = filepath.Join(outputPath, stat.Name())
|
||||
err = os.Mkdir(outputPath, 0700)
|
||||
if err != nil {
|
||||
return filterPassword(err)
|
||||
@ -120,11 +121,14 @@ func (Archives) Decompress(ss []*stream.SeekableStream, outputPath string, args
|
||||
return err
|
||||
}
|
||||
relPath := strings.TrimPrefix(p, path+"/")
|
||||
dstPath := stdpath.Join(outputPath, relPath)
|
||||
dstPath := filepath.Join(outputPath, relPath)
|
||||
if !strings.HasPrefix(dstPath, outputPath+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("illegal file path: %s", relPath)
|
||||
}
|
||||
if d.IsDir() {
|
||||
err = os.MkdirAll(dstPath, 0700)
|
||||
} else {
|
||||
dir := stdpath.Dir(dstPath)
|
||||
dir := filepath.Dir(dstPath)
|
||||
err = decompress(fsys, p, dir, func(_ float64) {})
|
||||
}
|
||||
return err
|
||||
|
@ -1,10 +1,11 @@
|
||||
package archives
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
fs2 "io/fs"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
@ -69,7 +70,11 @@ func decompress(fsys fs2.FS, filePath, targetPath string, up model.UpdateProgres
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(stdpath.Join(targetPath, stat.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
destPath := filepath.Join(targetPath, stat.Name())
|
||||
if !strings.HasPrefix(destPath, targetPath+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("illegal file path: %s", stat.Name())
|
||||
}
|
||||
f, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,9 +1,11 @@
|
||||
package iso9660
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
@ -79,7 +81,11 @@ func (ISO9660) Decompress(ss []*stream.SeekableStream, outputPath string, args m
|
||||
}
|
||||
if obj.IsDir() {
|
||||
if args.InnerPath != "/" {
|
||||
outputPath = stdpath.Join(outputPath, obj.Name())
|
||||
rootpath := outputPath
|
||||
outputPath = filepath.Join(outputPath, obj.Name())
|
||||
if !strings.HasPrefix(outputPath, rootpath+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("illegal file path: %s", obj.Name())
|
||||
}
|
||||
if err = os.MkdirAll(outputPath, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,8 +1,9 @@
|
||||
package iso9660
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
@ -62,7 +63,11 @@ func toModelObj(file *iso9660.File) model.Obj {
|
||||
}
|
||||
|
||||
func decompress(f *iso9660.File, path string, up model.UpdateProgress) error {
|
||||
file, err := os.OpenFile(stdpath.Join(path, f.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
destPath := filepath.Join(path, f.Name())
|
||||
if !strings.HasPrefix(destPath, path+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("illegal file path: %s", f.Name())
|
||||
}
|
||||
file, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -84,7 +89,10 @@ func decompressAll(children []*iso9660.File, path string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nextPath := stdpath.Join(path, child.Name())
|
||||
nextPath := filepath.Join(path, child.Name())
|
||||
if !strings.HasPrefix(nextPath, path+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("illegal file path: %s", child.Name())
|
||||
}
|
||||
if err = os.MkdirAll(nextPath, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ package rardecode
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
@ -93,7 +93,7 @@ func (RarDecoder) Decompress(ss []*stream.SeekableStream, outputPath string, arg
|
||||
}
|
||||
} else {
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||
innerBase := stdpath.Base(innerPath)
|
||||
innerBase := filepath.Base(innerPath)
|
||||
createdBaseDir := false
|
||||
for {
|
||||
var header *rardecode.FileHeader
|
||||
@ -115,7 +115,7 @@ func (RarDecoder) Decompress(ss []*stream.SeekableStream, outputPath string, arg
|
||||
}
|
||||
break
|
||||
} else if strings.HasPrefix(name, innerPath+"/") {
|
||||
targetPath := stdpath.Join(outputPath, innerBase)
|
||||
targetPath := filepath.Join(outputPath, innerBase)
|
||||
if !createdBaseDir {
|
||||
err = os.Mkdir(targetPath, 0700)
|
||||
if err != nil {
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@ -124,7 +124,7 @@ type WrapFileInfo struct {
|
||||
}
|
||||
|
||||
func (f *WrapFileInfo) Name() string {
|
||||
return stdpath.Base(f.File.Name)
|
||||
return filepath.Base(f.File.Name)
|
||||
}
|
||||
|
||||
func (f *WrapFileInfo) Size() int64 {
|
||||
@ -183,12 +183,16 @@ func getReader(ss []*stream.SeekableStream, password string) (*rardecode.Reader,
|
||||
|
||||
func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath, outputPath string) error {
|
||||
targetPath := outputPath
|
||||
dir, base := stdpath.Split(filePath)
|
||||
dir, base := filepath.Split(filePath)
|
||||
if dir != "" {
|
||||
targetPath = stdpath.Join(targetPath, dir)
|
||||
err := os.MkdirAll(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
targetPath = filepath.Join(targetPath, dir)
|
||||
if strings.HasPrefix(targetPath, outputPath+string(os.PathSeparator)) {
|
||||
err := os.MkdirAll(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
targetPath = outputPath
|
||||
}
|
||||
}
|
||||
if base != "" {
|
||||
@ -201,7 +205,11 @@ func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath
|
||||
}
|
||||
|
||||
func _decompress(reader *rardecode.Reader, header *rardecode.FileHeader, targetPath string, up model.UpdateProgress) error {
|
||||
f, err := os.OpenFile(stdpath.Join(targetPath, stdpath.Base(header.Name)), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
destPath := filepath.Join(targetPath, filepath.Base(header.Name))
|
||||
if !strings.HasPrefix(destPath, targetPath+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("illegal file path: %s", filepath.Base(header.Name))
|
||||
}
|
||||
f, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,10 +1,11 @@
|
||||
package tool
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
@ -40,13 +41,13 @@ func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree
|
||||
isNewFolder := false
|
||||
if !file.FileInfo().IsDir() {
|
||||
// 先将 文件 添加到 所在的文件夹
|
||||
dir = stdpath.Dir(name)
|
||||
dir = filepath.Dir(name)
|
||||
dirObj = dirMap[dir]
|
||||
if dirObj == nil {
|
||||
isNewFolder = dir != "."
|
||||
dirObj = &model.ObjectTree{}
|
||||
dirObj.IsFolder = true
|
||||
dirObj.Name = stdpath.Base(dir)
|
||||
dirObj.Name = filepath.Base(dir)
|
||||
dirObj.Modified = file.FileInfo().ModTime()
|
||||
dirMap[dir] = dirObj
|
||||
}
|
||||
@ -64,28 +65,28 @@ func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree
|
||||
dirMap[dir] = dirObj
|
||||
}
|
||||
dirObj.IsFolder = true
|
||||
dirObj.Name = stdpath.Base(dir)
|
||||
dirObj.Name = filepath.Base(dir)
|
||||
dirObj.Modified = file.FileInfo().ModTime()
|
||||
}
|
||||
if isNewFolder {
|
||||
// 将 文件夹 添加到 父文件夹
|
||||
// 考虑压缩包仅记录文件的路径,不记录文件夹
|
||||
// 循环创建所有父文件夹
|
||||
parentDir := stdpath.Dir(dir)
|
||||
parentDir := filepath.Dir(dir)
|
||||
for {
|
||||
parentDirObj := dirMap[parentDir]
|
||||
if parentDirObj == nil {
|
||||
parentDirObj = &model.ObjectTree{}
|
||||
if parentDir != "." {
|
||||
parentDirObj.IsFolder = true
|
||||
parentDirObj.Name = stdpath.Base(parentDir)
|
||||
parentDirObj.Name = filepath.Base(parentDir)
|
||||
parentDirObj.Modified = file.FileInfo().ModTime()
|
||||
}
|
||||
dirMap[parentDir] = parentDirObj
|
||||
}
|
||||
parentDirObj.Children = append(parentDirObj.Children, dirObj)
|
||||
|
||||
parentDir = stdpath.Dir(parentDir)
|
||||
parentDir = filepath.Dir(parentDir)
|
||||
if dirMap[parentDir] != nil {
|
||||
break
|
||||
}
|
||||
@ -127,7 +128,7 @@ func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args mode
|
||||
}
|
||||
} else {
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||
innerBase := stdpath.Base(innerPath)
|
||||
innerBase := filepath.Base(innerPath)
|
||||
createdBaseDir := false
|
||||
for _, file := range files {
|
||||
name := file.Name()
|
||||
@ -138,7 +139,7 @@ func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args mode
|
||||
}
|
||||
break
|
||||
} else if strings.HasPrefix(name, innerPath+"/") {
|
||||
targetPath := stdpath.Join(outputPath, innerBase)
|
||||
targetPath := filepath.Join(outputPath, innerBase)
|
||||
if !createdBaseDir {
|
||||
err = os.Mkdir(targetPath, 0700)
|
||||
if err != nil {
|
||||
@ -159,12 +160,16 @@ func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args mode
|
||||
|
||||
func decompress(file SubFile, filePath, outputPath, password string) error {
|
||||
targetPath := outputPath
|
||||
dir, base := stdpath.Split(filePath)
|
||||
dir, base := filepath.Split(filePath)
|
||||
if dir != "" {
|
||||
targetPath = stdpath.Join(targetPath, dir)
|
||||
err := os.MkdirAll(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
targetPath = filepath.Join(targetPath, dir)
|
||||
if strings.HasPrefix(targetPath, outputPath+string(os.PathSeparator)) {
|
||||
err := os.MkdirAll(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
targetPath = outputPath
|
||||
}
|
||||
}
|
||||
if base != "" {
|
||||
@ -185,7 +190,11 @@ func _decompress(file SubFile, targetPath, password string, up model.UpdateProgr
|
||||
return err
|
||||
}
|
||||
defer func() { _ = rc.Close() }()
|
||||
f, err := os.OpenFile(stdpath.Join(targetPath, file.FileInfo().Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
destPath := filepath.Join(targetPath, file.FileInfo().Name())
|
||||
if !strings.HasPrefix(destPath, targetPath+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("illegal file path: %s", file.FileInfo().Name())
|
||||
}
|
||||
f, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -114,6 +114,7 @@ func InitialSettings() []model.SettingItem {
|
||||
{Key: "share_icon", Value: "🎁", Type: conf.TypeString, Group: model.STYLE},
|
||||
{Key: "home_container", Value: "max_980px", Type: conf.TypeSelect, Options: "max_980px,hope_container", Group: model.STYLE},
|
||||
{Key: "settings_layout", Value: "list", Type: conf.TypeSelect, Options: "list,responsive", Group: model.STYLE},
|
||||
{Key: conf.HideStorageDetails, Value: "false", Type: conf.TypeBool, Group: model.STYLE, Flag: model.PRIVATE},
|
||||
// preview settings
|
||||
{Key: conf.TextTypes, Value: "txt,htm,html,xml,java,properties,sql,js,md,json,conf,ini,vue,php,py,bat,gitignore,yml,go,sh,c,cpp,h,hpp,tsx,vtt,srt,ass,rs,lrc,strm", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
|
||||
{Key: conf.AudioTypes, Value: "mp3,flac,ogg,m4a,wav,opus,wma", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
|
||||
|
@ -17,9 +17,10 @@ const (
|
||||
AllowMounted = "allow_mounted"
|
||||
RobotsTxt = "robots_txt"
|
||||
|
||||
Logo = "logo" // multi-lines text, L1: light, EOL: dark
|
||||
Favicon = "favicon"
|
||||
MainColor = "main_color"
|
||||
Logo = "logo" // multi-lines text, L1: light, EOL: dark
|
||||
Favicon = "favicon"
|
||||
MainColor = "main_color"
|
||||
HideStorageDetails = "hide_storage_details"
|
||||
|
||||
// preview
|
||||
TextTypes = "text_types"
|
||||
|
@ -210,6 +210,11 @@ type ArchiveDecompressResult interface {
|
||||
ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error)
|
||||
}
|
||||
|
||||
type WithDetails interface {
|
||||
// GetDetails get storage details (total space, free space, etc.)
|
||||
GetDetails(ctx context.Context) (*model.StorageDetails, error)
|
||||
}
|
||||
|
||||
type Reference interface {
|
||||
InitReference(storage Driver) error
|
||||
}
|
||||
|
@ -19,8 +19,9 @@ import (
|
||||
// then pass the actual path to the op package
|
||||
|
||||
type ListArgs struct {
|
||||
Refresh bool
|
||||
NoLog bool
|
||||
Refresh bool
|
||||
NoLog bool
|
||||
WithStorageDetails bool
|
||||
}
|
||||
|
||||
func List(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) {
|
||||
@ -35,11 +36,12 @@ func List(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error)
|
||||
}
|
||||
|
||||
type GetArgs struct {
|
||||
NoLog bool
|
||||
NoLog bool
|
||||
WithStorageDetails bool
|
||||
}
|
||||
|
||||
func Get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) {
|
||||
res, err := get(ctx, path)
|
||||
res, err := get(ctx, path, args)
|
||||
if err != nil {
|
||||
if !args.NoLog {
|
||||
log.Warnf("failed get %s: %s", path, err)
|
||||
|
@ -11,11 +11,11 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func get(ctx context.Context, path string) (model.Obj, error) {
|
||||
func get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) {
|
||||
path = utils.FixAndCleanPath(path)
|
||||
// maybe a virtual file
|
||||
if path != "/" {
|
||||
virtualFiles := op.GetStorageVirtualFilesByPath(stdpath.Dir(path))
|
||||
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, stdpath.Dir(path), !args.WithStorageDetails)
|
||||
for _, f := range virtualFiles {
|
||||
if f.GetName() == stdpath.Base(path) {
|
||||
return f, nil
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
func list(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) {
|
||||
meta, _ := ctx.Value(conf.MetaKey).(*model.Meta)
|
||||
user, _ := ctx.Value(conf.UserKey).(*model.User)
|
||||
virtualFiles := op.GetStorageVirtualFilesByPath(path)
|
||||
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails)
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(path)
|
||||
if err != nil && len(virtualFiles) == 0 {
|
||||
return nil, errors.WithMessage(err, "failed get storage")
|
||||
|
@ -80,6 +80,10 @@ type SetPath interface {
|
||||
SetPath(path string)
|
||||
}
|
||||
|
||||
type ObjWithProvider interface {
|
||||
GetProvider() string
|
||||
}
|
||||
|
||||
func SortFiles(objs []Obj, orderBy, orderDirection string) {
|
||||
if orderBy == "" {
|
||||
return
|
||||
@ -166,6 +170,16 @@ func GetUrl(obj Obj) (url string, ok bool) {
|
||||
return url, false
|
||||
}
|
||||
|
||||
func GetProvider(obj Obj) (string, bool) {
|
||||
if obj, ok := obj.(ObjWithProvider); ok {
|
||||
return obj.GetProvider(), true
|
||||
}
|
||||
if unwrap, ok := obj.(ObjUnwrap); ok {
|
||||
return GetProvider(unwrap.Unwrap())
|
||||
}
|
||||
return "unknown", false
|
||||
}
|
||||
|
||||
func GetRawObject(obj Obj) *Object {
|
||||
switch v := obj.(type) {
|
||||
case *ObjThumbURL:
|
||||
@ -174,6 +188,8 @@ func GetRawObject(obj Obj) *Object {
|
||||
return &v.Object
|
||||
case *ObjectURL:
|
||||
return &v.Object
|
||||
case *ObjectProvider:
|
||||
return &v.Object
|
||||
case *Object:
|
||||
return v
|
||||
}
|
||||
|
@ -99,3 +99,16 @@ type ObjThumbURL struct {
|
||||
Thumbnail
|
||||
Url
|
||||
}
|
||||
|
||||
type Provider struct {
|
||||
Provider string
|
||||
}
|
||||
|
||||
func (p Provider) GetProvider() string {
|
||||
return p.Provider
|
||||
}
|
||||
|
||||
type ObjectProvider struct {
|
||||
Object
|
||||
Provider
|
||||
}
|
||||
|
@ -55,3 +55,40 @@ func (p Proxy) Webdav302() bool {
|
||||
func (p Proxy) WebdavProxyURL() bool {
|
||||
return p.WebdavPolicy == "use_proxy_url"
|
||||
}
|
||||
|
||||
type DiskUsage struct {
|
||||
TotalSpace uint64 `json:"total_space"`
|
||||
FreeSpace uint64 `json:"free_space"`
|
||||
}
|
||||
|
||||
type StorageDetails struct {
|
||||
DiskUsage
|
||||
}
|
||||
|
||||
type StorageDetailsWithName struct {
|
||||
*StorageDetails
|
||||
DriverName string `json:"driver_name"`
|
||||
}
|
||||
|
||||
type ObjWithStorageDetails interface {
|
||||
GetStorageDetails() *StorageDetailsWithName
|
||||
}
|
||||
|
||||
type ObjStorageDetails struct {
|
||||
Obj
|
||||
StorageDetailsWithName
|
||||
}
|
||||
|
||||
func (o ObjStorageDetails) GetStorageDetails() *StorageDetailsWithName {
|
||||
return &o.StorageDetailsWithName
|
||||
}
|
||||
|
||||
func GetStorageDetails(obj Obj) (*StorageDetailsWithName, bool) {
|
||||
if obj, ok := obj.(ObjWithStorageDetails); ok {
|
||||
return obj.GetStorageDetails(), true
|
||||
}
|
||||
if unwrap, ok := obj.(ObjUnwrap); ok {
|
||||
return GetStorageDetails(unwrap.Unwrap())
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ type ConcurrencyLimit struct {
|
||||
Limit int // 需要大于0
|
||||
}
|
||||
|
||||
var ErrExceedMaxConcurrency = ErrorHttpStatusCode(http.StatusTooManyRequests)
|
||||
var ErrExceedMaxConcurrency = HttpStatusCodeError(http.StatusTooManyRequests)
|
||||
|
||||
func (l *ConcurrencyLimit) sub() error {
|
||||
l._m.Lock()
|
||||
@ -403,7 +403,7 @@ var errInfiniteRetry = errors.New("infinite retry")
|
||||
func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
|
||||
resp, err := d.cfg.HttpClient(d.ctx, params)
|
||||
if err != nil {
|
||||
statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode)
|
||||
statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError)
|
||||
if !ok {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
||||
reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
code = http.StatusRequestedRangeNotSatisfiable
|
||||
if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
|
||||
if statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError); ok {
|
||||
code = int(statusCode)
|
||||
}
|
||||
http.Error(w, err.Error(), code)
|
||||
@ -137,7 +137,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
||||
sendContent, err = RangeReadCloser.RangeRead(ctx, ra)
|
||||
if err != nil {
|
||||
code = http.StatusRequestedRangeNotSatisfiable
|
||||
if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
|
||||
if statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError); ok {
|
||||
code = int(statusCode)
|
||||
}
|
||||
http.Error(w, err.Error(), code)
|
||||
@ -199,7 +199,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
||||
log.Warnf("Maybe size incorrect or reader not giving correct/full data, or connection closed before finish. written bytes: %d ,sendSize:%d, ", written, sendSize)
|
||||
}
|
||||
code = http.StatusInternalServerError
|
||||
if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
|
||||
if statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError); ok {
|
||||
code = int(statusCode)
|
||||
}
|
||||
w.WriteHeader(code)
|
||||
@ -253,14 +253,14 @@ func RequestHttp(ctx context.Context, httpMethod string, headerOverride http.Hea
|
||||
_ = res.Body.Close()
|
||||
msg := string(all)
|
||||
log.Debugln(msg)
|
||||
return nil, fmt.Errorf("http request [%s] failure,status: %w response:%s", URL, ErrorHttpStatusCode(res.StatusCode), msg)
|
||||
return nil, fmt.Errorf("http request [%s] failure,status: %w response:%s", URL, HttpStatusCodeError(res.StatusCode), msg)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
type ErrorHttpStatusCode int
|
||||
type HttpStatusCodeError int
|
||||
|
||||
func (e ErrorHttpStatusCode) Error() string {
|
||||
func (e HttpStatusCodeError) Error() string {
|
||||
return fmt.Sprintf("%d|%s", e, http.StatusText(int(e)))
|
||||
}
|
||||
|
||||
|
@ -405,11 +405,8 @@ func DriverExtract(ctx context.Context, storage driver.Driver, path string, args
|
||||
return nil
|
||||
})
|
||||
link, err, _ := extractG.Do(key, fn)
|
||||
if err == nil && !link.AcquireReference() {
|
||||
for err == nil && !link.AcquireReference() {
|
||||
link, err, _ = extractG.Do(key, fn)
|
||||
if err == nil {
|
||||
link.AcquireReference()
|
||||
}
|
||||
}
|
||||
if err == errLinkMFileCache {
|
||||
if linkM != nil {
|
||||
|
@ -184,6 +184,9 @@ func Get(ctx context.Context, storage driver.Driver, path string) (model.Obj, er
|
||||
if err == nil {
|
||||
return model.WrapObjName(obj), nil
|
||||
}
|
||||
if !errs.IsNotImplement(err) {
|
||||
return nil, errors.WithMessage(err, "failed to get obj")
|
||||
}
|
||||
}
|
||||
|
||||
// is root folder
|
||||
@ -327,11 +330,8 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
|
||||
return nil
|
||||
})
|
||||
link, err, _ := linkG.Do(key, fn)
|
||||
if err == nil && !link.AcquireReference() {
|
||||
for err == nil && !link.AcquireReference() {
|
||||
link, err, _ = linkG.Do(key, fn)
|
||||
if err == nil {
|
||||
link.AcquireReference()
|
||||
}
|
||||
}
|
||||
|
||||
if err == errLinkMFileCache {
|
||||
@ -486,12 +486,18 @@ func Rename(ctx context.Context, storage driver.Driver, srcPath, dstName string,
|
||||
updateCacheObj(storage, srcDirPath, srcRawObj, model.WrapObjName(newObj))
|
||||
} else if !utils.IsBool(lazyCache...) {
|
||||
DeleteCache(storage, srcDirPath)
|
||||
if srcRawObj.IsDir() {
|
||||
ClearCache(storage, srcPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
case driver.Rename:
|
||||
err = s.Rename(ctx, srcObj, dstName)
|
||||
if err == nil && !utils.IsBool(lazyCache...) {
|
||||
DeleteCache(storage, srcDirPath)
|
||||
if srcRawObj.IsDir() {
|
||||
ClearCache(storage, srcPath)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return errs.NotImplement
|
||||
@ -624,6 +630,11 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
|
||||
up = func(p float64) {}
|
||||
}
|
||||
|
||||
// 如果小于0,则通过缓存获取完整大小,可能发生于流式上传
|
||||
if file.GetSize() < 0 {
|
||||
log.Warnf("file size < 0, try to get full size from cache")
|
||||
file.CacheFullAndWriter(nil, nil)
|
||||
}
|
||||
switch s := storage.(type) {
|
||||
case driver.PutResult:
|
||||
var newObj model.Obj
|
||||
|
@ -15,7 +15,6 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/generic_sync"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
mapset "github.com/deckarep/golang-set/v2"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -335,6 +334,40 @@ func getStoragesByPath(path string) []driver.Driver {
|
||||
// for example, there are: /a/b,/a/c,/a/d/e,/a/b.balance1,/av
|
||||
// GetStorageVirtualFilesByPath(/a) => b,c,d
|
||||
func GetStorageVirtualFilesByPath(prefix string) []model.Obj {
|
||||
return getStorageVirtualFilesByPath(prefix, func(_ driver.Driver, obj model.Obj) model.Obj {
|
||||
return obj
|
||||
})
|
||||
}
|
||||
|
||||
func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, hideDetails ...bool) []model.Obj {
|
||||
if utils.IsBool(hideDetails...) {
|
||||
return GetStorageVirtualFilesByPath(prefix)
|
||||
}
|
||||
return getStorageVirtualFilesByPath(prefix, func(d driver.Driver, obj model.Obj) model.Obj {
|
||||
ret := &model.ObjStorageDetails{
|
||||
Obj: obj,
|
||||
StorageDetailsWithName: model.StorageDetailsWithName{
|
||||
StorageDetails: nil,
|
||||
DriverName: d.Config().Name,
|
||||
},
|
||||
}
|
||||
storage, ok := d.(driver.WithDetails)
|
||||
if !ok {
|
||||
return ret
|
||||
}
|
||||
details, err := storage.GetDetails(ctx)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errs.NotImplement) {
|
||||
log.Errorf("failed get %s storage details: %+v", d.GetStorage().MountPath, err)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
ret.StorageDetails = details
|
||||
return ret
|
||||
})
|
||||
}
|
||||
|
||||
func getStorageVirtualFilesByPath(prefix string, rootCallback func(driver.Driver, model.Obj) model.Obj) []model.Obj {
|
||||
files := make([]model.Obj, 0)
|
||||
storages := storagesMap.Values()
|
||||
sort.Slice(storages, func(i, j int) bool {
|
||||
@ -345,21 +378,30 @@ func GetStorageVirtualFilesByPath(prefix string) []model.Obj {
|
||||
})
|
||||
|
||||
prefix = utils.FixAndCleanPath(prefix)
|
||||
set := mapset.NewSet[string]()
|
||||
set := make(map[string]int)
|
||||
for _, v := range storages {
|
||||
mountPath := utils.GetActualMountPath(v.GetStorage().MountPath)
|
||||
// Exclude prefix itself and non prefix
|
||||
if len(prefix) >= len(mountPath) || !utils.IsSubPath(prefix, mountPath) {
|
||||
continue
|
||||
}
|
||||
name := strings.SplitN(strings.TrimPrefix(mountPath[len(prefix):], "/"), "/", 2)[0]
|
||||
if set.Add(name) {
|
||||
files = append(files, &model.Object{
|
||||
Name: name,
|
||||
names := strings.SplitN(strings.TrimPrefix(mountPath[len(prefix):], "/"), "/", 2)
|
||||
idx, ok := set[names[0]]
|
||||
if !ok {
|
||||
set[names[0]] = len(files)
|
||||
obj := &model.Object{
|
||||
Name: names[0],
|
||||
Size: 0,
|
||||
Modified: v.GetStorage().Modified,
|
||||
IsFolder: true,
|
||||
})
|
||||
}
|
||||
if len(names) == 1 {
|
||||
files = append(files, rootCallback(v, obj))
|
||||
} else {
|
||||
files = append(files, obj)
|
||||
}
|
||||
} else if len(names) == 1 {
|
||||
files[idx] = rootCallback(v, files[idx])
|
||||
}
|
||||
}
|
||||
return files
|
||||
|
@ -137,6 +137,60 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ
|
||||
if writer != nil {
|
||||
reader = io.TeeReader(reader, writer)
|
||||
}
|
||||
|
||||
if f.GetSize() < 0 {
|
||||
if f.peekBuff == nil {
|
||||
f.peekBuff = &buffer.Reader{}
|
||||
}
|
||||
// 检查是否有数据
|
||||
buf := []byte{0}
|
||||
n, err := io.ReadFull(reader, buf)
|
||||
if n > 0 {
|
||||
f.peekBuff.Append(buf[:n])
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
f.size = f.peekBuff.Size()
|
||||
f.Reader = f.peekBuff
|
||||
return f.peekBuff, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if conf.MaxBufferLimit-n > conf.MmapThreshold && conf.MmapThreshold > 0 {
|
||||
m, err := mmap.Alloc(conf.MaxBufferLimit - n)
|
||||
if err == nil {
|
||||
f.Add(utils.CloseFunc(func() error {
|
||||
return mmap.Free(m)
|
||||
}))
|
||||
n, err = io.ReadFull(reader, m)
|
||||
if n > 0 {
|
||||
f.peekBuff.Append(m[:n])
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
f.size = f.peekBuff.Size()
|
||||
f.Reader = f.peekBuff
|
||||
return f.peekBuff, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tmpF, err := utils.CreateTempFile(reader, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Add(utils.CloseFunc(func() error {
|
||||
return errors.Join(tmpF.Close(), os.RemoveAll(tmpF.Name()))
|
||||
}))
|
||||
peekF, err := buffer.NewPeekFile(f.peekBuff, tmpF)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.size = peekF.Size()
|
||||
f.Reader = peekF
|
||||
return peekF, nil
|
||||
}
|
||||
|
||||
f.Reader = reader
|
||||
return f.cache(f.GetSize())
|
||||
}
|
||||
@ -162,7 +216,7 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
}
|
||||
|
||||
size := httpRange.Start + httpRange.Length
|
||||
if f.peekBuff != nil && size <= int64(f.peekBuff.Len()) {
|
||||
if f.peekBuff != nil && size <= int64(f.peekBuff.Size()) {
|
||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
|
||||
@ -194,7 +248,7 @@ func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
||||
f.peekBuff = &buffer.Reader{}
|
||||
f.oriReader = f.Reader
|
||||
}
|
||||
bufSize := maxCacheSize - int64(f.peekBuff.Len())
|
||||
bufSize := maxCacheSize - int64(f.peekBuff.Size())
|
||||
var buf []byte
|
||||
if conf.MmapThreshold > 0 && bufSize >= int64(conf.MmapThreshold) {
|
||||
m, err := mmap.Alloc(int(bufSize))
|
||||
@ -213,7 +267,7 @@ func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err)
|
||||
}
|
||||
f.peekBuff.Append(buf)
|
||||
if int64(f.peekBuff.Len()) >= f.GetSize() {
|
||||
if int64(f.peekBuff.Size()) >= f.GetSize() {
|
||||
f.Reader = f.peekBuff
|
||||
f.oriReader = nil
|
||||
} else {
|
||||
|
@ -77,7 +77,7 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
|
||||
|
||||
response, err := net.RequestHttp(ctx, "GET", header, link.URL)
|
||||
if err != nil {
|
||||
if _, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok {
|
||||
if _, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fmt.Errorf("http request failure, err:%w", err)
|
||||
|
@ -8,83 +8,86 @@ import (
|
||||
// 用于存储不复用的[]byte
|
||||
type Reader struct {
|
||||
bufs [][]byte
|
||||
length int
|
||||
offset int
|
||||
size int64
|
||||
offset int64
|
||||
}
|
||||
|
||||
func (r *Reader) Len() int {
|
||||
return r.length
|
||||
func (r *Reader) Size() int64 {
|
||||
return r.size
|
||||
}
|
||||
|
||||
func (r *Reader) Append(buf []byte) {
|
||||
r.length += len(buf)
|
||||
r.size += int64(len(buf))
|
||||
r.bufs = append(r.bufs, buf)
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
n, err := r.ReadAt(p, int64(r.offset))
|
||||
n, err := r.ReadAt(p, r.offset)
|
||||
if n > 0 {
|
||||
r.offset += n
|
||||
r.offset += int64(n)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *Reader) ReadAt(p []byte, off int64) (int, error) {
|
||||
if off < 0 || off >= int64(r.length) {
|
||||
if off < 0 || off >= r.size {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n, length := 0, int64(0)
|
||||
n := 0
|
||||
readFrom := false
|
||||
for _, buf := range r.bufs {
|
||||
newLength := length + int64(len(buf))
|
||||
if readFrom {
|
||||
w := copy(p[n:], buf)
|
||||
n += w
|
||||
} else if off < newLength {
|
||||
nn := copy(p[n:], buf)
|
||||
n += nn
|
||||
if n == len(p) {
|
||||
return n, nil
|
||||
}
|
||||
} else if newOff := off - int64(len(buf)); newOff >= 0 {
|
||||
off = newOff
|
||||
} else {
|
||||
nn := copy(p, buf[off:])
|
||||
if nn == len(p) {
|
||||
return nn, nil
|
||||
}
|
||||
n += nn
|
||||
readFrom = true
|
||||
w := copy(p[n:], buf[int(off-length):])
|
||||
n += w
|
||||
}
|
||||
if n == len(p) {
|
||||
return n, nil
|
||||
}
|
||||
length = newLength
|
||||
}
|
||||
|
||||
return n, io.EOF
|
||||
}
|
||||
|
||||
func (r *Reader) Seek(offset int64, whence int) (int64, error) {
|
||||
var abs int
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
abs = int(offset)
|
||||
case io.SeekCurrent:
|
||||
abs = r.offset + int(offset)
|
||||
offset = r.offset + offset
|
||||
case io.SeekEnd:
|
||||
abs = r.length + int(offset)
|
||||
offset = r.size + offset
|
||||
default:
|
||||
return 0, errors.New("Seek: invalid whence")
|
||||
}
|
||||
|
||||
if abs < 0 || abs > r.length {
|
||||
if offset < 0 || offset > r.size {
|
||||
return 0, errors.New("Seek: invalid offset")
|
||||
}
|
||||
|
||||
r.offset = abs
|
||||
return int64(abs), nil
|
||||
r.offset = offset
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (r *Reader) Reset() {
|
||||
clear(r.bufs)
|
||||
r.bufs = nil
|
||||
r.length = 0
|
||||
r.size = 0
|
||||
r.offset = 0
|
||||
}
|
||||
|
||||
func NewReader(buf ...[]byte) *Reader {
|
||||
b := &Reader{}
|
||||
b := &Reader{
|
||||
bufs: make([][]byte, 0, len(buf)),
|
||||
}
|
||||
for _, b1 := range buf {
|
||||
b.Append(b1)
|
||||
}
|
||||
|
@ -13,8 +13,7 @@ func TestReader_ReadAt(t *testing.T) {
|
||||
}
|
||||
bs := &Reader{}
|
||||
bs.Append([]byte("github.com"))
|
||||
bs.Append([]byte("/"))
|
||||
bs.Append([]byte("OpenList"))
|
||||
bs.Append([]byte("/OpenList"))
|
||||
bs.Append([]byte("Team/"))
|
||||
bs.Append([]byte("OpenList"))
|
||||
tests := []struct {
|
||||
@ -71,7 +70,7 @@ func TestReader_ReadAt(t *testing.T) {
|
||||
off: 24,
|
||||
},
|
||||
want: func(a args, n int, err error) error {
|
||||
if n != bs.Len()-int(a.off) {
|
||||
if n != int(bs.Size()-a.off) {
|
||||
return errors.New("read length not match")
|
||||
}
|
||||
if string(a.p[:n]) != "OpenList" {
|
||||
|
88
pkg/buffer/file.go
Normal file
88
pkg/buffer/file.go
Normal file
@ -0,0 +1,88 @@
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
type PeekFile struct {
|
||||
peek *Reader
|
||||
file *os.File
|
||||
offset int64
|
||||
size int64
|
||||
}
|
||||
|
||||
func (p *PeekFile) Read(b []byte) (n int, err error) {
|
||||
n, err = p.ReadAt(b, p.offset)
|
||||
if n > 0 {
|
||||
p.offset += int64(n)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (p *PeekFile) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
if off < p.peek.Size() {
|
||||
n, err = p.peek.ReadAt(b, off)
|
||||
if err == nil || n == len(b) {
|
||||
return n, nil
|
||||
}
|
||||
// EOF
|
||||
}
|
||||
var nn int
|
||||
nn, err = p.file.ReadAt(b[n:], off+int64(n)-p.peek.Size())
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
func (p *PeekFile) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
case io.SeekCurrent:
|
||||
if offset == 0 {
|
||||
return p.offset, nil
|
||||
}
|
||||
offset = p.offset + offset
|
||||
case io.SeekEnd:
|
||||
offset = p.size + offset
|
||||
default:
|
||||
return 0, errors.New("Seek: invalid whence")
|
||||
}
|
||||
|
||||
if offset < 0 || offset > p.size {
|
||||
return 0, errors.New("Seek: invalid offset")
|
||||
}
|
||||
if offset <= p.peek.Size() {
|
||||
_, err := p.peek.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, err = p.file.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
_, err := p.peek.Seek(p.peek.Size(), io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, err = p.file.Seek(offset-p.peek.Size(), io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
p.offset = offset
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (p *PeekFile) Size() int64 {
|
||||
return p.size
|
||||
}
|
||||
|
||||
func NewPeekFile(peek *Reader, file *os.File) (*PeekFile, error) {
|
||||
stat, err := file.Stat()
|
||||
if err == nil {
|
||||
return &PeekFile{peek: peek, file: file, size: stat.Size() + peek.Size()}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
@ -57,6 +57,11 @@ var (
|
||||
Supported []*HashType
|
||||
)
|
||||
|
||||
func GetHashByName(name string) (ht *HashType, ok bool) {
|
||||
ht, ok = name2hash[name]
|
||||
return
|
||||
}
|
||||
|
||||
// RegisterHash adds a new Hash to the list and returns its Type
|
||||
func RegisterHash(name, alias string, width int, newFunc func() hash.Hash) *HashType {
|
||||
return RegisterHashWithParam(name, alias, width, func(a ...any) hash.Hash { return newFunc() })
|
||||
|
@ -200,26 +200,37 @@ type SyncClosers struct {
|
||||
var _ SyncClosersIF = (*SyncClosers)(nil)
|
||||
|
||||
func (c *SyncClosers) AcquireReference() bool {
|
||||
ref := atomic.AddInt32(&c.ref, 1)
|
||||
if ref > 0 {
|
||||
// log.Debugf("SyncClosers.AcquireReference %p,ref=%d\n", c, ref)
|
||||
return true
|
||||
for {
|
||||
ref := atomic.LoadInt32(&c.ref)
|
||||
if ref < 0 {
|
||||
return false
|
||||
}
|
||||
newRef := ref + 1
|
||||
if atomic.CompareAndSwapInt32(&c.ref, ref, newRef) {
|
||||
log.Debugf("AcquireReference %p: %d", c, newRef)
|
||||
return true
|
||||
}
|
||||
}
|
||||
atomic.StoreInt32(&c.ref, math.MinInt16)
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *SyncClosers) Close() error {
|
||||
ref := atomic.AddInt32(&c.ref, -1)
|
||||
if ref < -1 {
|
||||
atomic.StoreInt32(&c.ref, math.MinInt16)
|
||||
return nil
|
||||
for {
|
||||
ref := atomic.LoadInt32(&c.ref)
|
||||
if ref < 0 {
|
||||
return nil
|
||||
}
|
||||
newRef := ref - 1
|
||||
if newRef <= 0 {
|
||||
newRef = math.MinInt16
|
||||
}
|
||||
if atomic.CompareAndSwapInt32(&c.ref, ref, newRef) {
|
||||
log.Debugf("Close %p: %d", c, ref)
|
||||
if newRef > 0 {
|
||||
return nil
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
// log.Debugf("SyncClosers.Close %p,ref=%d\n", c, ref+1)
|
||||
if ref > 0 {
|
||||
return nil
|
||||
}
|
||||
atomic.StoreInt32(&c.ref, math.MinInt16)
|
||||
|
||||
var errs []error
|
||||
for _, closer := range c.closers {
|
||||
|
@ -147,7 +147,7 @@ func proxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) {
|
||||
if Writer.IsWritten() {
|
||||
log.Errorf("%s %s local proxy error: %+v", c.Request.Method, c.Request.URL.Path, err)
|
||||
} else {
|
||||
if statusCode, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok {
|
||||
if statusCode, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok {
|
||||
common.ErrorPage(c, err, int(statusCode), true)
|
||||
} else {
|
||||
common.ErrorPage(c, err, 500, true)
|
||||
|
@ -33,18 +33,19 @@ type DirReq struct {
|
||||
}
|
||||
|
||||
type ObjResp struct {
|
||||
Id string `json:"id"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
IsDir bool `json:"is_dir"`
|
||||
Modified time.Time `json:"modified"`
|
||||
Created time.Time `json:"created"`
|
||||
Sign string `json:"sign"`
|
||||
Thumb string `json:"thumb"`
|
||||
Type int `json:"type"`
|
||||
HashInfoStr string `json:"hashinfo"`
|
||||
HashInfo map[*utils.HashType]string `json:"hash_info"`
|
||||
Id string `json:"id"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
IsDir bool `json:"is_dir"`
|
||||
Modified time.Time `json:"modified"`
|
||||
Created time.Time `json:"created"`
|
||||
Sign string `json:"sign"`
|
||||
Thumb string `json:"thumb"`
|
||||
Type int `json:"type"`
|
||||
HashInfoStr string `json:"hashinfo"`
|
||||
HashInfo map[*utils.HashType]string `json:"hash_info"`
|
||||
MountDetails *model.StorageDetailsWithName `json:"mount_details,omitempty"`
|
||||
}
|
||||
|
||||
type FsListResp struct {
|
||||
@ -98,7 +99,10 @@ func FsList(c *gin.Context, req *ListReq, user *model.User) {
|
||||
common.ErrorStrResp(c, "Refresh without permission", 403)
|
||||
return
|
||||
}
|
||||
objs, err := fs.List(c.Request.Context(), reqPath, &fs.ListArgs{Refresh: req.Refresh})
|
||||
objs, err := fs.List(c.Request.Context(), reqPath, &fs.ListArgs{
|
||||
Refresh: req.Refresh,
|
||||
WithStorageDetails: !user.IsGuest() && !setting.GetBool(conf.HideStorageDetails),
|
||||
})
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
@ -224,19 +228,21 @@ func toObjsResp(objs []model.Obj, parent string, encrypt bool) []ObjResp {
|
||||
var resp []ObjResp
|
||||
for _, obj := range objs {
|
||||
thumb, _ := model.GetThumb(obj)
|
||||
mountDetails, _ := model.GetStorageDetails(obj)
|
||||
resp = append(resp, ObjResp{
|
||||
Id: obj.GetID(),
|
||||
Path: obj.GetPath(),
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfoStr: obj.GetHash().String(),
|
||||
HashInfo: obj.GetHash().Export(),
|
||||
Sign: common.Sign(obj, parent, encrypt),
|
||||
Thumb: thumb,
|
||||
Type: utils.GetObjType(obj.GetName(), obj.IsDir()),
|
||||
Id: obj.GetID(),
|
||||
Path: obj.GetPath(),
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfoStr: obj.GetHash().String(),
|
||||
HashInfo: obj.GetHash().Export(),
|
||||
Sign: common.Sign(obj, parent, encrypt),
|
||||
Thumb: thumb,
|
||||
Type: utils.GetObjType(obj.GetName(), obj.IsDir()),
|
||||
MountDetails: mountDetails,
|
||||
})
|
||||
}
|
||||
return resp
|
||||
@ -293,7 +299,9 @@ func FsGet(c *gin.Context, req *FsGetReq, user *model.User) {
|
||||
common.ErrorStrResp(c, "password is incorrect or you have no permission", 403)
|
||||
return
|
||||
}
|
||||
obj, err := fs.Get(c.Request.Context(), reqPath, &fs.GetArgs{})
|
||||
obj, err := fs.Get(c.Request.Context(), reqPath, &fs.GetArgs{
|
||||
WithStorageDetails: !user.IsGuest() && !setting.GetBool(conf.HideStorageDetails),
|
||||
})
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
@ -301,8 +309,8 @@ func FsGet(c *gin.Context, req *FsGetReq, user *model.User) {
|
||||
var rawURL string
|
||||
|
||||
storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{})
|
||||
provider := "unknown"
|
||||
if err == nil {
|
||||
provider, ok := model.GetProvider(obj)
|
||||
if !ok && err == nil {
|
||||
provider = storage.Config().Name
|
||||
}
|
||||
if !obj.IsDir() {
|
||||
@ -350,20 +358,22 @@ func FsGet(c *gin.Context, req *FsGetReq, user *model.User) {
|
||||
}
|
||||
parentMeta, _ := op.GetNearestMeta(parentPath)
|
||||
thumb, _ := model.GetThumb(obj)
|
||||
mountDetails, _ := model.GetStorageDetails(obj)
|
||||
common.SuccessResp(c, FsGetResp{
|
||||
ObjResp: ObjResp{
|
||||
Id: obj.GetID(),
|
||||
Path: obj.GetPath(),
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfoStr: obj.GetHash().String(),
|
||||
HashInfo: obj.GetHash().Export(),
|
||||
Sign: common.Sign(obj, parentPath, isEncrypt(meta, reqPath)),
|
||||
Type: utils.GetFileType(obj.GetName()),
|
||||
Thumb: thumb,
|
||||
Id: obj.GetID(),
|
||||
Path: obj.GetPath(),
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfoStr: obj.GetHash().String(),
|
||||
HashInfo: obj.GetHash().Export(),
|
||||
Sign: common.Sign(obj, parentPath, isEncrypt(meta, reqPath)),
|
||||
Type: utils.GetFileType(obj.GetName()),
|
||||
Thumb: thumb,
|
||||
MountDetails: mountDetails,
|
||||
},
|
||||
RawURL: rawURL,
|
||||
Readme: getReadme(meta, reqPath),
|
||||
|
@ -56,14 +56,17 @@ func FsStream(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
dir, name := stdpath.Split(path)
|
||||
sizeStr := c.GetHeader("Content-Length")
|
||||
if sizeStr == "" {
|
||||
sizeStr = "0"
|
||||
}
|
||||
size, err := strconv.ParseInt(sizeStr, 10, 64)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
// 如果请求头 Content-Length 和 X-File-Size 都没有,则 size=-1,表示未知大小的流式上传
|
||||
size := c.Request.ContentLength
|
||||
if size < 0 {
|
||||
sizeStr := c.GetHeader("X-File-Size")
|
||||
if sizeStr != "" {
|
||||
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
h := make(map[*utils.HashType]string)
|
||||
if md5 := c.GetHeader("X-File-Md5"); md5 != "" {
|
||||
|
@ -3,9 +3,11 @@ package handles
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/db"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
@ -13,6 +15,42 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type StorageResp struct {
|
||||
model.Storage
|
||||
MountDetails *model.StorageDetails `json:"mount_details,omitempty"`
|
||||
}
|
||||
|
||||
func makeStorageResp(c *gin.Context, storages []model.Storage) []*StorageResp {
|
||||
ret := make([]*StorageResp, len(storages))
|
||||
var wg sync.WaitGroup
|
||||
for i, s := range storages {
|
||||
ret[i] = &StorageResp{
|
||||
Storage: s,
|
||||
MountDetails: nil,
|
||||
}
|
||||
d, err := op.GetStorageByMountPath(s.MountPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
wd, ok := d.(driver.WithDetails)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
details, err := wd.GetDetails(c)
|
||||
if err != nil {
|
||||
log.Errorf("failed get %s details: %+v", s.MountPath, err)
|
||||
return
|
||||
}
|
||||
ret[i].MountDetails = details
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
return ret
|
||||
}
|
||||
|
||||
func ListStorages(c *gin.Context) {
|
||||
var req model.PageReq
|
||||
if err := c.ShouldBind(&req); err != nil {
|
||||
@ -27,7 +65,7 @@ func ListStorages(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
common.SuccessResp(c, common.PageResp{
|
||||
Content: storages,
|
||||
Content: makeStorageResp(c, storages),
|
||||
Total: total,
|
||||
})
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -271,7 +272,7 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta
|
||||
}
|
||||
err = common.Proxy(w, r, link, fi)
|
||||
if err != nil {
|
||||
if statusCode, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok {
|
||||
if statusCode, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok {
|
||||
return int(statusCode), err
|
||||
}
|
||||
return http.StatusInternalServerError, fmt.Errorf("webdav proxy error: %+v", err)
|
||||
@ -341,9 +342,19 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int,
|
||||
if err != nil {
|
||||
return http.StatusForbidden, err
|
||||
}
|
||||
size := r.ContentLength
|
||||
if size < 0 {
|
||||
sizeStr := r.Header.Get("X-File-Size")
|
||||
if sizeStr != "" {
|
||||
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||
if err != nil {
|
||||
return http.StatusBadRequest, err
|
||||
}
|
||||
}
|
||||
}
|
||||
obj := model.Object{
|
||||
Name: path.Base(reqPath),
|
||||
Size: r.ContentLength,
|
||||
Size: size,
|
||||
Modified: h.getModTime(r),
|
||||
Ctime: h.getCreateTime(r),
|
||||
}
|
||||
|
Reference in New Issue
Block a user