mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-09-20 20:56:20 +08:00
Compare commits
8 Commits
Author | SHA1 | Date | |
---|---|---|---|
3936e736e6 | |||
68433d4f5b | |||
cc16cb35bf | |||
d3bc6321f4 | |||
cbbb5ad231 | |||
c1d03c5bcc | |||
61a8ed515f | |||
bbb7c06504 |
2
.github/workflows/beta_release.yml
vendored
2
.github/workflows/beta_release.yml
vendored
@ -87,7 +87,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.24.5"
|
||||
go-version: "1.25.0"
|
||||
|
||||
- name: Setup web
|
||||
run: bash build.sh dev web
|
||||
|
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@ -33,7 +33,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.24.5"
|
||||
go-version: "1.25.0"
|
||||
|
||||
- name: Setup web
|
||||
run: bash build.sh dev web
|
||||
|
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@ -46,7 +46,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.24'
|
||||
go-version: '1.25.0'
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
4
.github/workflows/release_docker.yml
vendored
4
.github/workflows/release_docker.yml
vendored
@ -47,7 +47,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
go-version: '1.25.0'
|
||||
|
||||
- name: Cache Musl
|
||||
id: cache-musl
|
||||
@ -87,7 +87,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
go-version: '1.25.0'
|
||||
|
||||
- name: Cache Musl
|
||||
id: cache-musl
|
||||
|
2
.github/workflows/test_docker.yml
vendored
2
.github/workflows/test_docker.yml
vendored
@ -36,7 +36,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
go-version: '1.25.0'
|
||||
|
||||
- name: Cache Musl
|
||||
id: cache-musl
|
||||
|
6
build.sh
6
build.sh
@ -236,7 +236,7 @@ BuildRelease() {
|
||||
BuildLoongGLIBC() {
|
||||
local target_abi="$2"
|
||||
local output_file="$1"
|
||||
local oldWorldGoVersion="1.24.3"
|
||||
local oldWorldGoVersion="1.25.0"
|
||||
|
||||
if [ "$target_abi" = "abi1.0" ]; then
|
||||
echo building for linux-loong64-abi1.0
|
||||
@ -254,13 +254,13 @@ BuildLoongGLIBC() {
|
||||
|
||||
# Download and setup patched Go compiler for old-world
|
||||
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
||||
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
||||
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250821/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
||||
-o go-loong64-abi1.0.tar.gz; then
|
||||
echo "Error: Failed to download patched Go compiler for old-world ABI1.0"
|
||||
if [ -n "$GITHUB_TOKEN" ]; then
|
||||
echo "Error output from curl:"
|
||||
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
||||
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
||||
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250821/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
||||
-o go-loong64-abi1.0.tar.gz || true
|
||||
fi
|
||||
return 1
|
||||
|
@ -337,6 +337,27 @@ func (d *Open115) OfflineList(ctx context.Context) (*sdk.OfflineTaskListResp, er
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (d *Open115) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
userInfo, err := d.client.UserInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total, err := userInfo.RtSpaceInfo.AllTotal.Size.Int64()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
free, err := userInfo.RtSpaceInfo.AllRemain.Size.Int64()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: uint64(total),
|
||||
FreeSpace: uint64(free),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||
// return nil, errs.NotImplement
|
||||
|
@ -28,7 +28,7 @@ func (f File) CreateTime() time.Time {
|
||||
}
|
||||
|
||||
func (f File) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
return utils.NewHashInfo(utils.MD5, f.Etag)
|
||||
}
|
||||
|
||||
func (f File) GetPath() string {
|
||||
|
@ -214,5 +214,20 @@ func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
||||
return nil, fmt.Errorf("upload complete timeout")
|
||||
}
|
||||
|
||||
func (d *Open123) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
userInfo, err := d.getUserInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total := userInfo.Data.SpacePermanent + userInfo.Data.SpaceTemp
|
||||
free := total - userInfo.Data.SpaceUsed
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Open123)(nil)
|
||||
var _ driver.PutResult = (*Open123)(nil)
|
||||
|
@ -133,9 +133,9 @@ type UserInfoResp struct {
|
||||
// HeadImage string `json:"headImage"`
|
||||
// Passport string `json:"passport"`
|
||||
// Mail string `json:"mail"`
|
||||
// SpaceUsed int64 `json:"spaceUsed"`
|
||||
// SpacePermanent int64 `json:"spacePermanent"`
|
||||
// SpaceTemp int64 `json:"spaceTemp"`
|
||||
SpaceUsed uint64 `json:"spaceUsed"`
|
||||
SpacePermanent uint64 `json:"spacePermanent"`
|
||||
SpaceTemp uint64 `json:"spaceTemp"`
|
||||
// SpaceTempExpr int64 `json:"spaceTempExpr"`
|
||||
// Vip bool `json:"vip"`
|
||||
// DirectTraffic int64 `json:"directTraffic"`
|
||||
|
@ -24,7 +24,7 @@ type File struct {
|
||||
}
|
||||
|
||||
func (f File) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
return utils.NewHashInfo(utils.MD5, f.Etag)
|
||||
}
|
||||
|
||||
func (f File) GetPath() string {
|
||||
|
@ -785,8 +785,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
|
||||
// step.4 上传切片
|
||||
uploadUrl := uploadUrls[0]
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
|
||||
driver.NewLimitedUploadStream(ctx, rateLimitedRd), isFamily)
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -79,21 +79,45 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
var ret *model.Object
|
||||
provider := ""
|
||||
for _, dst := range dsts {
|
||||
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
|
||||
rawPath := stdpath.Join(dst, sub)
|
||||
obj, err := fs.Get(ctx, rawPath, &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return &model.Object{
|
||||
Path: path,
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
storage, err := fs.GetStorage(rawPath, &fs.GetStoragesArgs{})
|
||||
if ret == nil {
|
||||
ret = &model.Object{
|
||||
Path: path,
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
}
|
||||
if !d.ProviderPassThrough || err != nil {
|
||||
break
|
||||
}
|
||||
provider = storage.Config().Name
|
||||
} else if err != nil || provider != storage.GetStorage().Driver {
|
||||
provider = ""
|
||||
break
|
||||
}
|
||||
}
|
||||
if ret == nil {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
if provider != "" {
|
||||
return &model.ObjectProvider{
|
||||
Object: *ret,
|
||||
Provider: model.Provider{
|
||||
Provider: provider,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return nil, errs.ObjectNotFound
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
@ -186,6 +210,35 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
func (d *Alias) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
root, sub := d.getRootAndPath(args.Obj.GetPath())
|
||||
dsts, ok := d.pathMap[root]
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
rawPath := stdpath.Join(dst, sub)
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(rawPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
other, ok := storage.(driver.Other)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
obj, err := op.GetUnwrap(ctx, storage, actualPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return other.Other(ctx, model.OtherArgs{
|
||||
Obj: obj,
|
||||
Method: args.Method,
|
||||
Data: args.Data,
|
||||
})
|
||||
}
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
if !d.Writable {
|
||||
return errs.PermissionDenied
|
||||
|
@ -15,6 +15,7 @@ type Addition struct {
|
||||
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
|
||||
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
|
||||
Writable bool `json:"writable" type:"bool" default:"false"`
|
||||
ProviderPassThrough bool `json:"provider_pass_through" type:"bool" default:"false"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -291,6 +291,21 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
res, err := d.request(ctx, limiterOther, "/adrive/v1.0/user/getSpaceInfo", http.MethodPost, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total := utils.Json.Get(res, "personal_space_info", "total_size").ToUint64()
|
||||
used := utils.Json.Get(res, "personal_space_info", "used_size").ToUint64()
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: total - used,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*AliyundriveOpen)(nil)
|
||||
var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
|
||||
var _ driver.MoveResult = (*AliyundriveOpen)(nil)
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_netdisk"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_photo"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/chaoxing"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/chunk"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve_v4"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/cnb_releases"
|
||||
|
@ -364,4 +364,12 @@ func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
du, err := d.quota()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{DiskUsage: *du}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*BaiduNetdisk)(nil)
|
||||
|
@ -189,3 +189,12 @@ type PrecreateResp struct {
|
||||
// return_type=2
|
||||
File File `json:"info"`
|
||||
}
|
||||
|
||||
type QuotaResp struct {
|
||||
Errno int `json:"errno"`
|
||||
RequestId int64 `json:"request_id"`
|
||||
Total uint64 `json:"total"`
|
||||
Used uint64 `json:"used"`
|
||||
//Free uint64 `json:"free"`
|
||||
//Expire bool `json:"expire"`
|
||||
}
|
||||
|
@ -381,6 +381,18 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
||||
return maxSliceSize
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) quota() (*model.DiskUsage, error) {
|
||||
var resp QuotaResp
|
||||
_, err := d.request("https://pan.baidu.com/api/quota", http.MethodGet, nil, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.DiskUsage{
|
||||
TotalSpace: resp.Total,
|
||||
FreeSpace: resp.Total - resp.Used,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// func encodeURIComponent(str string) string {
|
||||
// r := url.QueryEscape(str)
|
||||
// r = strings.ReplaceAll(r, "+", "%20")
|
||||
|
488
drivers/chunk/driver.go
Normal file
488
drivers/chunk/driver.go
Normal file
@ -0,0 +1,488 @@
|
||||
package chunk
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
)
|
||||
|
||||
type Chunk struct {
|
||||
model.Storage
|
||||
Addition
|
||||
}
|
||||
|
||||
func (d *Chunk) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Chunk) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Chunk) Init(ctx context.Context) error {
|
||||
if d.PartSize <= 0 {
|
||||
return errors.New("part size must be positive")
|
||||
}
|
||||
d.RemotePath = utils.FixAndCleanPath(d.RemotePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Chunk) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Chunk) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
if utils.PathEqual(path, "/") {
|
||||
return &model.Object{
|
||||
Name: "Root",
|
||||
IsFolder: true,
|
||||
Path: "/",
|
||||
}, nil
|
||||
}
|
||||
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remoteActualPath = stdpath.Join(remoteActualPath, path)
|
||||
if remoteObj, err := op.Get(ctx, remoteStorage, remoteActualPath); err == nil {
|
||||
return &model.Object{
|
||||
Path: path,
|
||||
Name: remoteObj.GetName(),
|
||||
Size: remoteObj.GetSize(),
|
||||
Modified: remoteObj.ModTime(),
|
||||
IsFolder: remoteObj.IsDir(),
|
||||
HashInfo: remoteObj.GetHash(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
remoteActualDir, name := stdpath.Split(remoteActualPath)
|
||||
chunkName := "[openlist_chunk]" + name
|
||||
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, chunkName), model.ListArgs{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var totalSize int64 = 0
|
||||
// 0号块必须存在
|
||||
chunkSizes := []int64{-1}
|
||||
h := make(map[*utils.HashType]string)
|
||||
var first model.Obj
|
||||
for _, o := range chunkObjs {
|
||||
if o.IsDir() {
|
||||
continue
|
||||
}
|
||||
if after, ok := strings.CutPrefix(o.GetName(), "hash_"); ok {
|
||||
hn, value, ok := strings.Cut(strings.TrimSuffix(after, d.CustomExt), "_")
|
||||
if ok {
|
||||
ht, ok := utils.GetHashByName(hn)
|
||||
if ok {
|
||||
h[ht] = value
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
totalSize += o.GetSize()
|
||||
if len(chunkSizes) > idx {
|
||||
if idx == 0 {
|
||||
first = o
|
||||
}
|
||||
chunkSizes[idx] = o.GetSize()
|
||||
} else if len(chunkSizes) == idx {
|
||||
chunkSizes = append(chunkSizes, o.GetSize())
|
||||
} else {
|
||||
newChunkSizes := make([]int64, idx+1)
|
||||
copy(newChunkSizes, chunkSizes)
|
||||
chunkSizes = newChunkSizes
|
||||
chunkSizes[idx] = o.GetSize()
|
||||
}
|
||||
}
|
||||
// 检查0号块不等于-1 以支持空文件
|
||||
// 如果块数量大于1 最后一块不可能为0
|
||||
// 只检查中间块是否有0
|
||||
for i, l := 0, len(chunkSizes)-2; ; i++ {
|
||||
if i == 0 {
|
||||
if chunkSizes[i] == -1 {
|
||||
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||
}
|
||||
} else if chunkSizes[i] == 0 {
|
||||
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||
}
|
||||
if i >= l {
|
||||
break
|
||||
}
|
||||
}
|
||||
reqDir, _ := stdpath.Split(path)
|
||||
objRes := chunkObject{
|
||||
Object: model.Object{
|
||||
Path: stdpath.Join(reqDir, chunkName),
|
||||
Name: name,
|
||||
Size: totalSize,
|
||||
Modified: first.ModTime(),
|
||||
Ctime: first.CreateTime(),
|
||||
},
|
||||
chunkSizes: chunkSizes,
|
||||
}
|
||||
if len(h) > 0 {
|
||||
objRes.HashInfo = utils.NewHashInfoByMap(h)
|
||||
}
|
||||
return &objRes, nil
|
||||
}
|
||||
|
||||
func (d *Chunk) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remoteActualDir := stdpath.Join(remoteActualPath, dir.GetPath())
|
||||
remoteObjs, err := op.List(ctx, remoteStorage, remoteActualDir, model.ListArgs{
|
||||
ReqPath: args.ReqPath,
|
||||
Refresh: args.Refresh,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([]model.Obj, 0, len(remoteObjs))
|
||||
for _, obj := range remoteObjs {
|
||||
rawName := obj.GetName()
|
||||
if obj.IsDir() {
|
||||
if name, ok := strings.CutPrefix(rawName, "[openlist_chunk]"); ok {
|
||||
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, rawName), model.ListArgs{
|
||||
ReqPath: stdpath.Join(args.ReqPath, rawName),
|
||||
Refresh: args.Refresh,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalSize := int64(0)
|
||||
h := make(map[*utils.HashType]string)
|
||||
first := obj
|
||||
for _, o := range chunkObjs {
|
||||
if o.IsDir() {
|
||||
continue
|
||||
}
|
||||
if after, ok := strings.CutPrefix(strings.TrimSuffix(o.GetName(), d.CustomExt), "hash_"); ok {
|
||||
hn, value, ok := strings.Cut(after, "_")
|
||||
if ok {
|
||||
ht, ok := utils.GetHashByName(hn)
|
||||
if ok {
|
||||
h[ht] = value
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if idx == 0 {
|
||||
first = o
|
||||
}
|
||||
totalSize += o.GetSize()
|
||||
}
|
||||
objRes := model.Object{
|
||||
Name: name,
|
||||
Size: totalSize,
|
||||
Modified: first.ModTime(),
|
||||
Ctime: first.CreateTime(),
|
||||
}
|
||||
if len(h) > 0 {
|
||||
objRes.HashInfo = utils.NewHashInfoByMap(h)
|
||||
}
|
||||
if !d.Thumbnail {
|
||||
result = append(result, &objRes)
|
||||
} else {
|
||||
thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp")
|
||||
thumb := fmt.Sprintf("%s/d%s?sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(thumbPath, true),
|
||||
sign.Sign(thumbPath))
|
||||
result = append(result, &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !d.ShowHidden && strings.HasPrefix(rawName, ".") {
|
||||
continue
|
||||
}
|
||||
thumb, ok := model.GetThumb(obj)
|
||||
objRes := model.Object{
|
||||
Name: rawName,
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
}
|
||||
if !ok {
|
||||
result = append(result, &objRes)
|
||||
} else {
|
||||
result = append(result, &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (d *Chunk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chunkFile, ok := file.(*chunkObject)
|
||||
remoteActualPath = stdpath.Join(remoteActualPath, file.GetPath())
|
||||
if !ok {
|
||||
l, _, err := op.Link(ctx, remoteStorage, remoteActualPath, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resultLink := *l
|
||||
resultLink.SyncClosers = utils.NewSyncClosers(l)
|
||||
return &resultLink, nil
|
||||
}
|
||||
fileSize := chunkFile.GetSize()
|
||||
mergedRrf := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
start := httpRange.Start
|
||||
length := httpRange.Length
|
||||
if length < 0 || start+length > fileSize {
|
||||
length = fileSize - start
|
||||
}
|
||||
if length == 0 {
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
}
|
||||
rs := make([]io.Reader, 0)
|
||||
cs := make(utils.Closers, 0)
|
||||
var (
|
||||
rc io.ReadCloser
|
||||
readFrom bool
|
||||
)
|
||||
for idx, chunkSize := range chunkFile.chunkSizes {
|
||||
if readFrom {
|
||||
l, o, err := op.Link(ctx, remoteStorage, stdpath.Join(remoteActualPath, d.getPartName(idx)), args)
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
cs = append(cs, l)
|
||||
chunkSize2 := l.ContentLength
|
||||
if chunkSize2 <= 0 {
|
||||
chunkSize2 = o.GetSize()
|
||||
}
|
||||
if chunkSize2 != chunkSize {
|
||||
_ = cs.Close()
|
||||
return nil, fmt.Errorf("chunk part[%d] size not match", idx)
|
||||
}
|
||||
rrf, err := stream.GetRangeReaderFromLink(chunkSize2, l)
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
newLength := length - chunkSize2
|
||||
if newLength >= 0 {
|
||||
length = newLength
|
||||
rc, err = rrf.RangeRead(ctx, http_range.Range{Length: -1})
|
||||
} else {
|
||||
rc, err = rrf.RangeRead(ctx, http_range.Range{Length: length})
|
||||
}
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
rs = append(rs, rc)
|
||||
cs = append(cs, rc)
|
||||
if newLength <= 0 {
|
||||
return utils.ReadCloser{
|
||||
Reader: io.MultiReader(rs...),
|
||||
Closer: &cs,
|
||||
}, nil
|
||||
}
|
||||
} else if newStart := start - chunkSize; newStart >= 0 {
|
||||
start = newStart
|
||||
} else {
|
||||
l, o, err := op.Link(ctx, remoteStorage, stdpath.Join(remoteActualPath, d.getPartName(idx)), args)
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
cs = append(cs, l)
|
||||
chunkSize2 := l.ContentLength
|
||||
if chunkSize2 <= 0 {
|
||||
chunkSize2 = o.GetSize()
|
||||
}
|
||||
if chunkSize2 != chunkSize {
|
||||
_ = cs.Close()
|
||||
return nil, fmt.Errorf("chunk part[%d] size not match", idx)
|
||||
}
|
||||
rrf, err := stream.GetRangeReaderFromLink(chunkSize2, l)
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
rc, err = rrf.RangeRead(ctx, http_range.Range{Start: start, Length: -1})
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
length -= chunkSize2 - start
|
||||
cs = append(cs, rc)
|
||||
if length <= 0 {
|
||||
return utils.ReadCloser{
|
||||
Reader: rc,
|
||||
Closer: &cs,
|
||||
}, nil
|
||||
}
|
||||
rs = append(rs, rc)
|
||||
readFrom = true
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("invalid range: start=%d,length=%d,fileSize=%d", httpRange.Start, httpRange.Length, fileSize)
|
||||
}
|
||||
return &model.Link{
|
||||
RangeReader: stream.RangeReaderFunc(mergedRrf),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Chunk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
path := stdpath.Join(d.RemotePath, parentDir.GetPath(), dirName)
|
||||
return fs.MakeDir(ctx, path)
|
||||
}
|
||||
|
||||
func (d *Chunk) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
src := stdpath.Join(d.RemotePath, srcObj.GetPath())
|
||||
dst := stdpath.Join(d.RemotePath, dstDir.GetPath())
|
||||
_, err := fs.Move(ctx, src, dst)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Chunk) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if _, ok := srcObj.(*chunkObject); ok {
|
||||
newName = "[openlist_chunk]" + newName
|
||||
}
|
||||
return fs.Rename(ctx, stdpath.Join(d.RemotePath, srcObj.GetPath()), newName)
|
||||
}
|
||||
|
||||
func (d *Chunk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
dst := stdpath.Join(d.RemotePath, dstDir.GetPath())
|
||||
src := stdpath.Join(d.RemotePath, srcObj.GetPath())
|
||||
_, err := fs.Copy(ctx, src, dst)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Chunk) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return fs.Remove(ctx, stdpath.Join(d.RemotePath, obj.GetPath()))
|
||||
}
|
||||
|
||||
func (d *Chunk) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.Thumbnail && dstDir.GetName() == ".thumbnails" {
|
||||
return op.Put(ctx, remoteStorage, stdpath.Join(remoteActualPath, dstDir.GetPath()), file, up)
|
||||
}
|
||||
upReader := &driver.ReaderUpdatingProgress{
|
||||
Reader: file,
|
||||
UpdateProgress: up,
|
||||
}
|
||||
dst := stdpath.Join(remoteActualPath, dstDir.GetPath(), "[openlist_chunk]"+file.GetName())
|
||||
if d.StoreHash {
|
||||
for ht, value := range file.GetHash().All() {
|
||||
_ = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: fmt.Sprintf("hash_%s_%s%s", ht.Name, value, d.CustomExt),
|
||||
Size: 1,
|
||||
Modified: file.ModTime(),
|
||||
},
|
||||
Mimetype: "application/octet-stream",
|
||||
Reader: bytes.NewReader([]byte{0}), // 兼容不支持空文件的驱动
|
||||
}, nil, true)
|
||||
}
|
||||
}
|
||||
fullPartCount := int(file.GetSize() / d.PartSize)
|
||||
tailSize := file.GetSize() % d.PartSize
|
||||
if tailSize == 0 && fullPartCount > 0 {
|
||||
fullPartCount--
|
||||
tailSize = d.PartSize
|
||||
}
|
||||
partIndex := 0
|
||||
for partIndex < fullPartCount {
|
||||
err = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: d.getPartName(partIndex),
|
||||
Size: d.PartSize,
|
||||
Modified: file.ModTime(),
|
||||
},
|
||||
Mimetype: file.GetMimetype(),
|
||||
Reader: io.LimitReader(upReader, d.PartSize),
|
||||
}, nil, true)
|
||||
if err != nil {
|
||||
_ = op.Remove(ctx, remoteStorage, dst)
|
||||
return err
|
||||
}
|
||||
partIndex++
|
||||
}
|
||||
err = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: d.getPartName(fullPartCount),
|
||||
Size: tailSize,
|
||||
Modified: file.ModTime(),
|
||||
},
|
||||
Mimetype: file.GetMimetype(),
|
||||
Reader: upReader,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
_ = op.Remove(ctx, remoteStorage, dst)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Chunk) getPartName(part int) string {
|
||||
return fmt.Sprintf("%d%s", part, d.CustomExt)
|
||||
}
|
||||
|
||||
func (d *Chunk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
remoteStorage, err := fs.GetStorage(d.RemotePath, &fs.GetStoragesArgs{})
|
||||
if err != nil {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
wd, ok := remoteStorage.(driver.WithDetails)
|
||||
if !ok {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
remoteDetails, err := wd.GetDetails(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: remoteDetails.DiskUsage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Chunk)(nil)
|
31
drivers/chunk/meta.go
Normal file
31
drivers/chunk/meta.go
Normal file
@ -0,0 +1,31 @@
|
||||
package chunk
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
RemotePath string `json:"remote_path" required:"true"`
|
||||
PartSize int64 `json:"part_size" required:"true" type:"number" help:"bytes"`
|
||||
CustomExt string `json:"custom_ext" type:"string"`
|
||||
StoreHash bool `json:"store_hash" type:"bool" default:"true"`
|
||||
|
||||
Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"`
|
||||
ShowHidden bool `json:"show_hidden" default:"true" required:"false" help:"show hidden directories and files"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Chunk",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
NoCache: true,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Chunk{}
|
||||
})
|
||||
}
|
8
drivers/chunk/obj.go
Normal file
8
drivers/chunk/obj.go
Normal file
@ -0,0 +1,8 @@
|
||||
package chunk
|
||||
|
||||
import "github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
|
||||
type chunkObject struct {
|
||||
model.Object
|
||||
chunkSizes []int64
|
||||
}
|
@ -339,6 +339,21 @@ func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir mode
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
// TODO return storage details (total space, free space, etc.)
|
||||
var r CapacityResp
|
||||
err := d.request(http.MethodGet, "/user/capacity", nil, &r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: r.Total,
|
||||
FreeSpace: r.Total - r.Used,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
@ -204,3 +204,9 @@ type FolderSummaryResp struct {
|
||||
CalculatedAt time.Time `json:"calculated_at"`
|
||||
} `json:"folder_summary"`
|
||||
}
|
||||
|
||||
type CapacityResp struct {
|
||||
Total uint64 `json:"total"`
|
||||
Used uint64 `json:"used"`
|
||||
// StoragePackTotal uint64 `json:"storage_pack_total"`
|
||||
}
|
||||
|
@ -411,6 +411,20 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Crypt) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
wd, ok := d.remoteStorage.(driver.WithDetails)
|
||||
if !ok {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
remoteDetails, err := wd.GetDetails(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: remoteDetails.DiskUsage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//func (d *Safe) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
@ -51,7 +51,7 @@ func (d *Local) Config() driver.Config {
|
||||
|
||||
func (d *Local) Init(ctx context.Context) error {
|
||||
if d.MkdirPerm == "" {
|
||||
d.mkdirPerm = 0777
|
||||
d.mkdirPerm = 0o777
|
||||
} else {
|
||||
v, err := strconv.ParseUint(d.MkdirPerm, 8, 32)
|
||||
if err != nil {
|
||||
@ -150,6 +150,7 @@ func (d *Local) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string, fullPath string) model.Obj {
|
||||
thumb := ""
|
||||
if d.Thumbnail {
|
||||
@ -198,7 +199,7 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
path = filepath.Join(d.GetRootPath(), path)
|
||||
f, err := os.Stat(path)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "cannot find the file") {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
return nil, err
|
||||
@ -375,7 +376,7 @@ func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
} else {
|
||||
if !utils.Exists(d.RecycleBinPath) {
|
||||
err = os.MkdirAll(d.RecycleBinPath, 0755)
|
||||
err = os.MkdirAll(d.RecycleBinPath, 0o755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -434,4 +435,14 @@ func (d *Local) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Local) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
du, err := getDiskUsage(d.RootFolderPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: du,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Local)(nil)
|
||||
|
@ -5,8 +5,25 @@ package local
|
||||
import (
|
||||
"io/fs"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
)
|
||||
|
||||
func isHidden(f fs.FileInfo, _ string) bool {
|
||||
return strings.HasPrefix(f.Name(), ".")
|
||||
}
|
||||
|
||||
func getDiskUsage(path string) (model.DiskUsage, error) {
|
||||
var stat syscall.Statfs_t
|
||||
err := syscall.Statfs(path, &stat)
|
||||
if err != nil {
|
||||
return model.DiskUsage{}, err
|
||||
}
|
||||
total := stat.Blocks * uint64(stat.Bsize)
|
||||
free := stat.Bfree * uint64(stat.Bsize)
|
||||
return model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
}, nil
|
||||
}
|
||||
|
@ -1,22 +1,51 @@
|
||||
//go:build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func isHidden(f fs.FileInfo, fullPath string) bool {
|
||||
filePath := filepath.Join(fullPath, f.Name())
|
||||
namePtr, err := syscall.UTF16PtrFromString(filePath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
attrs, err := syscall.GetFileAttributes(namePtr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return attrs&syscall.FILE_ATTRIBUTE_HIDDEN != 0
|
||||
}
|
||||
//go:build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func isHidden(f fs.FileInfo, fullPath string) bool {
|
||||
filePath := filepath.Join(fullPath, f.Name())
|
||||
namePtr, err := syscall.UTF16PtrFromString(filePath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
attrs, err := syscall.GetFileAttributes(namePtr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return attrs&syscall.FILE_ATTRIBUTE_HIDDEN != 0
|
||||
}
|
||||
|
||||
func getDiskUsage(path string) (model.DiskUsage, error) {
|
||||
abs, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return model.DiskUsage{}, err
|
||||
}
|
||||
root := filepath.VolumeName(abs)
|
||||
if len(root) != 2 || root[1] != ':' {
|
||||
return model.DiskUsage{}, errors.New("cannot get disk label")
|
||||
}
|
||||
var freeBytes, totalBytes, totalFreeBytes uint64
|
||||
err = windows.GetDiskFreeSpaceEx(
|
||||
windows.StringToUTF16Ptr(root),
|
||||
&freeBytes,
|
||||
&totalBytes,
|
||||
&totalFreeBytes,
|
||||
)
|
||||
if err != nil {
|
||||
return model.DiskUsage{}, err
|
||||
}
|
||||
return model.DiskUsage{
|
||||
TotalSpace: totalBytes,
|
||||
FreeSpace: freeBytes,
|
||||
}, nil
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
@ -127,4 +128,22 @@ func (d *SFTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *SFTP) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
stat, err := d.client.StatVFS(d.RootFolderPath)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "unimplemented") {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
total := stat.Blocks * stat.Bsize
|
||||
free := stat.Bfree * stat.Bsize
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*SFTP)(nil)
|
||||
|
@ -205,6 +205,22 @@ func (d *SMB) Put(ctx context.Context, dstDir model.Obj, stream model.FileStream
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *SMB) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
if err := d.checkConn(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := d.fs.Statfs(d.RootFolderPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: stat.BlockSize() * stat.TotalBlockCount(),
|
||||
FreeSpace: stat.BlockSize() * stat.AvailableBlockCount(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
//func (d *SMB) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
@ -93,6 +93,11 @@ func (d *Template) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.O
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Template) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
// TODO return storage details (total space, free space, etc.)
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
@ -1,10 +1,11 @@
|
||||
package archives
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
@ -107,7 +108,7 @@ func (Archives) Decompress(ss []*stream.SeekableStream, outputPath string, args
|
||||
}
|
||||
if stat.IsDir() {
|
||||
isDir = true
|
||||
outputPath = stdpath.Join(outputPath, stat.Name())
|
||||
outputPath = filepath.Join(outputPath, stat.Name())
|
||||
err = os.Mkdir(outputPath, 0700)
|
||||
if err != nil {
|
||||
return filterPassword(err)
|
||||
@ -120,11 +121,14 @@ func (Archives) Decompress(ss []*stream.SeekableStream, outputPath string, args
|
||||
return err
|
||||
}
|
||||
relPath := strings.TrimPrefix(p, path+"/")
|
||||
dstPath := stdpath.Join(outputPath, relPath)
|
||||
dstPath := filepath.Join(outputPath, relPath)
|
||||
if !strings.HasPrefix(dstPath, outputPath+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("illegal file path: %s", relPath)
|
||||
}
|
||||
if d.IsDir() {
|
||||
err = os.MkdirAll(dstPath, 0700)
|
||||
} else {
|
||||
dir := stdpath.Dir(dstPath)
|
||||
dir := filepath.Dir(dstPath)
|
||||
err = decompress(fsys, p, dir, func(_ float64) {})
|
||||
}
|
||||
return err
|
||||
|
@ -1,10 +1,11 @@
|
||||
package archives
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
fs2 "io/fs"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
@ -69,7 +70,11 @@ func decompress(fsys fs2.FS, filePath, targetPath string, up model.UpdateProgres
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(stdpath.Join(targetPath, stat.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
destPath := filepath.Join(targetPath, stat.Name())
|
||||
if !strings.HasPrefix(destPath, targetPath+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("illegal file path: %s", stat.Name())
|
||||
}
|
||||
f, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,9 +1,11 @@
|
||||
package iso9660
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
@ -79,7 +81,11 @@ func (ISO9660) Decompress(ss []*stream.SeekableStream, outputPath string, args m
|
||||
}
|
||||
if obj.IsDir() {
|
||||
if args.InnerPath != "/" {
|
||||
outputPath = stdpath.Join(outputPath, obj.Name())
|
||||
rootpath := outputPath
|
||||
outputPath = filepath.Join(outputPath, obj.Name())
|
||||
if !strings.HasPrefix(outputPath, rootpath+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("illegal file path: %s", obj.Name())
|
||||
}
|
||||
if err = os.MkdirAll(outputPath, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,8 +1,9 @@
|
||||
package iso9660
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
@ -62,7 +63,11 @@ func toModelObj(file *iso9660.File) model.Obj {
|
||||
}
|
||||
|
||||
func decompress(f *iso9660.File, path string, up model.UpdateProgress) error {
|
||||
file, err := os.OpenFile(stdpath.Join(path, f.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
destPath := filepath.Join(path, f.Name())
|
||||
if !strings.HasPrefix(destPath, path+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("illegal file path: %s", f.Name())
|
||||
}
|
||||
file, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -84,7 +89,10 @@ func decompressAll(children []*iso9660.File, path string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nextPath := stdpath.Join(path, child.Name())
|
||||
nextPath := filepath.Join(path, child.Name())
|
||||
if !strings.HasPrefix(nextPath, path+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("illegal file path: %s", child.Name())
|
||||
}
|
||||
if err = os.MkdirAll(nextPath, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ package rardecode
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
@ -93,7 +93,7 @@ func (RarDecoder) Decompress(ss []*stream.SeekableStream, outputPath string, arg
|
||||
}
|
||||
} else {
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||
innerBase := stdpath.Base(innerPath)
|
||||
innerBase := filepath.Base(innerPath)
|
||||
createdBaseDir := false
|
||||
for {
|
||||
var header *rardecode.FileHeader
|
||||
@ -115,7 +115,7 @@ func (RarDecoder) Decompress(ss []*stream.SeekableStream, outputPath string, arg
|
||||
}
|
||||
break
|
||||
} else if strings.HasPrefix(name, innerPath+"/") {
|
||||
targetPath := stdpath.Join(outputPath, innerBase)
|
||||
targetPath := filepath.Join(outputPath, innerBase)
|
||||
if !createdBaseDir {
|
||||
err = os.Mkdir(targetPath, 0700)
|
||||
if err != nil {
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@ -124,7 +124,7 @@ type WrapFileInfo struct {
|
||||
}
|
||||
|
||||
func (f *WrapFileInfo) Name() string {
|
||||
return stdpath.Base(f.File.Name)
|
||||
return filepath.Base(f.File.Name)
|
||||
}
|
||||
|
||||
func (f *WrapFileInfo) Size() int64 {
|
||||
@ -183,12 +183,16 @@ func getReader(ss []*stream.SeekableStream, password string) (*rardecode.Reader,
|
||||
|
||||
func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath, outputPath string) error {
|
||||
targetPath := outputPath
|
||||
dir, base := stdpath.Split(filePath)
|
||||
dir, base := filepath.Split(filePath)
|
||||
if dir != "" {
|
||||
targetPath = stdpath.Join(targetPath, dir)
|
||||
err := os.MkdirAll(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
targetPath = filepath.Join(targetPath, dir)
|
||||
if strings.HasPrefix(targetPath, outputPath+string(os.PathSeparator)) {
|
||||
err := os.MkdirAll(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
targetPath = outputPath
|
||||
}
|
||||
}
|
||||
if base != "" {
|
||||
@ -201,7 +205,11 @@ func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath
|
||||
}
|
||||
|
||||
func _decompress(reader *rardecode.Reader, header *rardecode.FileHeader, targetPath string, up model.UpdateProgress) error {
|
||||
f, err := os.OpenFile(stdpath.Join(targetPath, stdpath.Base(header.Name)), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
destPath := filepath.Join(targetPath, filepath.Base(header.Name))
|
||||
if !strings.HasPrefix(destPath, targetPath+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("illegal file path: %s", filepath.Base(header.Name))
|
||||
}
|
||||
f, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,10 +1,11 @@
|
||||
package tool
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
@ -40,13 +41,13 @@ func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree
|
||||
isNewFolder := false
|
||||
if !file.FileInfo().IsDir() {
|
||||
// 先将 文件 添加到 所在的文件夹
|
||||
dir = stdpath.Dir(name)
|
||||
dir = filepath.Dir(name)
|
||||
dirObj = dirMap[dir]
|
||||
if dirObj == nil {
|
||||
isNewFolder = dir != "."
|
||||
dirObj = &model.ObjectTree{}
|
||||
dirObj.IsFolder = true
|
||||
dirObj.Name = stdpath.Base(dir)
|
||||
dirObj.Name = filepath.Base(dir)
|
||||
dirObj.Modified = file.FileInfo().ModTime()
|
||||
dirMap[dir] = dirObj
|
||||
}
|
||||
@ -64,28 +65,28 @@ func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree
|
||||
dirMap[dir] = dirObj
|
||||
}
|
||||
dirObj.IsFolder = true
|
||||
dirObj.Name = stdpath.Base(dir)
|
||||
dirObj.Name = filepath.Base(dir)
|
||||
dirObj.Modified = file.FileInfo().ModTime()
|
||||
}
|
||||
if isNewFolder {
|
||||
// 将 文件夹 添加到 父文件夹
|
||||
// 考虑压缩包仅记录文件的路径,不记录文件夹
|
||||
// 循环创建所有父文件夹
|
||||
parentDir := stdpath.Dir(dir)
|
||||
parentDir := filepath.Dir(dir)
|
||||
for {
|
||||
parentDirObj := dirMap[parentDir]
|
||||
if parentDirObj == nil {
|
||||
parentDirObj = &model.ObjectTree{}
|
||||
if parentDir != "." {
|
||||
parentDirObj.IsFolder = true
|
||||
parentDirObj.Name = stdpath.Base(parentDir)
|
||||
parentDirObj.Name = filepath.Base(parentDir)
|
||||
parentDirObj.Modified = file.FileInfo().ModTime()
|
||||
}
|
||||
dirMap[parentDir] = parentDirObj
|
||||
}
|
||||
parentDirObj.Children = append(parentDirObj.Children, dirObj)
|
||||
|
||||
parentDir = stdpath.Dir(parentDir)
|
||||
parentDir = filepath.Dir(parentDir)
|
||||
if dirMap[parentDir] != nil {
|
||||
break
|
||||
}
|
||||
@ -127,7 +128,7 @@ func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args mode
|
||||
}
|
||||
} else {
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||
innerBase := stdpath.Base(innerPath)
|
||||
innerBase := filepath.Base(innerPath)
|
||||
createdBaseDir := false
|
||||
for _, file := range files {
|
||||
name := file.Name()
|
||||
@ -138,7 +139,7 @@ func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args mode
|
||||
}
|
||||
break
|
||||
} else if strings.HasPrefix(name, innerPath+"/") {
|
||||
targetPath := stdpath.Join(outputPath, innerBase)
|
||||
targetPath := filepath.Join(outputPath, innerBase)
|
||||
if !createdBaseDir {
|
||||
err = os.Mkdir(targetPath, 0700)
|
||||
if err != nil {
|
||||
@ -159,12 +160,16 @@ func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args mode
|
||||
|
||||
func decompress(file SubFile, filePath, outputPath, password string) error {
|
||||
targetPath := outputPath
|
||||
dir, base := stdpath.Split(filePath)
|
||||
dir, base := filepath.Split(filePath)
|
||||
if dir != "" {
|
||||
targetPath = stdpath.Join(targetPath, dir)
|
||||
err := os.MkdirAll(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
targetPath = filepath.Join(targetPath, dir)
|
||||
if strings.HasPrefix(targetPath, outputPath+string(os.PathSeparator)) {
|
||||
err := os.MkdirAll(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
targetPath = outputPath
|
||||
}
|
||||
}
|
||||
if base != "" {
|
||||
@ -185,7 +190,11 @@ func _decompress(file SubFile, targetPath, password string, up model.UpdateProgr
|
||||
return err
|
||||
}
|
||||
defer func() { _ = rc.Close() }()
|
||||
f, err := os.OpenFile(stdpath.Join(targetPath, file.FileInfo().Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
destPath := filepath.Join(targetPath, file.FileInfo().Name())
|
||||
if !strings.HasPrefix(destPath, targetPath+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("illegal file path: %s", file.FileInfo().Name())
|
||||
}
|
||||
f, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -114,6 +114,7 @@ func InitialSettings() []model.SettingItem {
|
||||
{Key: "share_icon", Value: "🎁", Type: conf.TypeString, Group: model.STYLE},
|
||||
{Key: "home_container", Value: "max_980px", Type: conf.TypeSelect, Options: "max_980px,hope_container", Group: model.STYLE},
|
||||
{Key: "settings_layout", Value: "list", Type: conf.TypeSelect, Options: "list,responsive", Group: model.STYLE},
|
||||
{Key: conf.HideStorageDetails, Value: "false", Type: conf.TypeBool, Group: model.STYLE, Flag: model.PRIVATE},
|
||||
// preview settings
|
||||
{Key: conf.TextTypes, Value: "txt,htm,html,xml,java,properties,sql,js,md,json,conf,ini,vue,php,py,bat,gitignore,yml,go,sh,c,cpp,h,hpp,tsx,vtt,srt,ass,rs,lrc,strm", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
|
||||
{Key: conf.AudioTypes, Value: "mp3,flac,ogg,m4a,wav,opus,wma", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
|
||||
|
@ -17,9 +17,10 @@ const (
|
||||
AllowMounted = "allow_mounted"
|
||||
RobotsTxt = "robots_txt"
|
||||
|
||||
Logo = "logo" // multi-lines text, L1: light, EOL: dark
|
||||
Favicon = "favicon"
|
||||
MainColor = "main_color"
|
||||
Logo = "logo" // multi-lines text, L1: light, EOL: dark
|
||||
Favicon = "favicon"
|
||||
MainColor = "main_color"
|
||||
HideStorageDetails = "hide_storage_details"
|
||||
|
||||
// preview
|
||||
TextTypes = "text_types"
|
||||
|
@ -210,6 +210,11 @@ type ArchiveDecompressResult interface {
|
||||
ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error)
|
||||
}
|
||||
|
||||
type WithDetails interface {
|
||||
// GetDetails get storage details (total space, free space, etc.)
|
||||
GetDetails(ctx context.Context) (*model.StorageDetails, error)
|
||||
}
|
||||
|
||||
type Reference interface {
|
||||
InitReference(storage Driver) error
|
||||
}
|
||||
|
@ -19,8 +19,9 @@ import (
|
||||
// then pass the actual path to the op package
|
||||
|
||||
type ListArgs struct {
|
||||
Refresh bool
|
||||
NoLog bool
|
||||
Refresh bool
|
||||
NoLog bool
|
||||
WithStorageDetails bool
|
||||
}
|
||||
|
||||
func List(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) {
|
||||
@ -35,11 +36,12 @@ func List(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error)
|
||||
}
|
||||
|
||||
type GetArgs struct {
|
||||
NoLog bool
|
||||
NoLog bool
|
||||
WithStorageDetails bool
|
||||
}
|
||||
|
||||
func Get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) {
|
||||
res, err := get(ctx, path)
|
||||
res, err := get(ctx, path, args)
|
||||
if err != nil {
|
||||
if !args.NoLog {
|
||||
log.Warnf("failed get %s: %s", path, err)
|
||||
|
@ -11,11 +11,11 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func get(ctx context.Context, path string) (model.Obj, error) {
|
||||
func get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) {
|
||||
path = utils.FixAndCleanPath(path)
|
||||
// maybe a virtual file
|
||||
if path != "/" {
|
||||
virtualFiles := op.GetStorageVirtualFilesByPath(stdpath.Dir(path))
|
||||
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, stdpath.Dir(path), !args.WithStorageDetails)
|
||||
for _, f := range virtualFiles {
|
||||
if f.GetName() == stdpath.Base(path) {
|
||||
return f, nil
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
func list(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) {
|
||||
meta, _ := ctx.Value(conf.MetaKey).(*model.Meta)
|
||||
user, _ := ctx.Value(conf.UserKey).(*model.User)
|
||||
virtualFiles := op.GetStorageVirtualFilesByPath(path)
|
||||
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails)
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(path)
|
||||
if err != nil && len(virtualFiles) == 0 {
|
||||
return nil, errors.WithMessage(err, "failed get storage")
|
||||
|
@ -80,6 +80,10 @@ type SetPath interface {
|
||||
SetPath(path string)
|
||||
}
|
||||
|
||||
type ObjWithProvider interface {
|
||||
GetProvider() string
|
||||
}
|
||||
|
||||
func SortFiles(objs []Obj, orderBy, orderDirection string) {
|
||||
if orderBy == "" {
|
||||
return
|
||||
@ -166,6 +170,16 @@ func GetUrl(obj Obj) (url string, ok bool) {
|
||||
return url, false
|
||||
}
|
||||
|
||||
func GetProvider(obj Obj) (string, bool) {
|
||||
if obj, ok := obj.(ObjWithProvider); ok {
|
||||
return obj.GetProvider(), true
|
||||
}
|
||||
if unwrap, ok := obj.(ObjUnwrap); ok {
|
||||
return GetProvider(unwrap.Unwrap())
|
||||
}
|
||||
return "unknown", false
|
||||
}
|
||||
|
||||
func GetRawObject(obj Obj) *Object {
|
||||
switch v := obj.(type) {
|
||||
case *ObjThumbURL:
|
||||
@ -174,6 +188,8 @@ func GetRawObject(obj Obj) *Object {
|
||||
return &v.Object
|
||||
case *ObjectURL:
|
||||
return &v.Object
|
||||
case *ObjectProvider:
|
||||
return &v.Object
|
||||
case *Object:
|
||||
return v
|
||||
}
|
||||
|
@ -99,3 +99,16 @@ type ObjThumbURL struct {
|
||||
Thumbnail
|
||||
Url
|
||||
}
|
||||
|
||||
type Provider struct {
|
||||
Provider string
|
||||
}
|
||||
|
||||
func (p Provider) GetProvider() string {
|
||||
return p.Provider
|
||||
}
|
||||
|
||||
type ObjectProvider struct {
|
||||
Object
|
||||
Provider
|
||||
}
|
||||
|
@ -55,3 +55,40 @@ func (p Proxy) Webdav302() bool {
|
||||
func (p Proxy) WebdavProxyURL() bool {
|
||||
return p.WebdavPolicy == "use_proxy_url"
|
||||
}
|
||||
|
||||
type DiskUsage struct {
|
||||
TotalSpace uint64 `json:"total_space"`
|
||||
FreeSpace uint64 `json:"free_space"`
|
||||
}
|
||||
|
||||
type StorageDetails struct {
|
||||
DiskUsage
|
||||
}
|
||||
|
||||
type StorageDetailsWithName struct {
|
||||
*StorageDetails
|
||||
DriverName string `json:"driver_name"`
|
||||
}
|
||||
|
||||
type ObjWithStorageDetails interface {
|
||||
GetStorageDetails() *StorageDetailsWithName
|
||||
}
|
||||
|
||||
type ObjStorageDetails struct {
|
||||
Obj
|
||||
StorageDetailsWithName
|
||||
}
|
||||
|
||||
func (o ObjStorageDetails) GetStorageDetails() *StorageDetailsWithName {
|
||||
return &o.StorageDetailsWithName
|
||||
}
|
||||
|
||||
func GetStorageDetails(obj Obj) (*StorageDetailsWithName, bool) {
|
||||
if obj, ok := obj.(ObjWithStorageDetails); ok {
|
||||
return obj.GetStorageDetails(), true
|
||||
}
|
||||
if unwrap, ok := obj.(ObjUnwrap); ok {
|
||||
return GetStorageDetails(unwrap.Unwrap())
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ type ConcurrencyLimit struct {
|
||||
Limit int // 需要大于0
|
||||
}
|
||||
|
||||
var ErrExceedMaxConcurrency = ErrorHttpStatusCode(http.StatusTooManyRequests)
|
||||
var ErrExceedMaxConcurrency = HttpStatusCodeError(http.StatusTooManyRequests)
|
||||
|
||||
func (l *ConcurrencyLimit) sub() error {
|
||||
l._m.Lock()
|
||||
@ -403,7 +403,7 @@ var errInfiniteRetry = errors.New("infinite retry")
|
||||
func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
|
||||
resp, err := d.cfg.HttpClient(d.ctx, params)
|
||||
if err != nil {
|
||||
statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode)
|
||||
statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError)
|
||||
if !ok {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
||||
reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
code = http.StatusRequestedRangeNotSatisfiable
|
||||
if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
|
||||
if statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError); ok {
|
||||
code = int(statusCode)
|
||||
}
|
||||
http.Error(w, err.Error(), code)
|
||||
@ -137,7 +137,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
||||
sendContent, err = RangeReadCloser.RangeRead(ctx, ra)
|
||||
if err != nil {
|
||||
code = http.StatusRequestedRangeNotSatisfiable
|
||||
if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
|
||||
if statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError); ok {
|
||||
code = int(statusCode)
|
||||
}
|
||||
http.Error(w, err.Error(), code)
|
||||
@ -199,7 +199,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
||||
log.Warnf("Maybe size incorrect or reader not giving correct/full data, or connection closed before finish. written bytes: %d ,sendSize:%d, ", written, sendSize)
|
||||
}
|
||||
code = http.StatusInternalServerError
|
||||
if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok {
|
||||
if statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError); ok {
|
||||
code = int(statusCode)
|
||||
}
|
||||
w.WriteHeader(code)
|
||||
@ -253,14 +253,14 @@ func RequestHttp(ctx context.Context, httpMethod string, headerOverride http.Hea
|
||||
_ = res.Body.Close()
|
||||
msg := string(all)
|
||||
log.Debugln(msg)
|
||||
return nil, fmt.Errorf("http request [%s] failure,status: %w response:%s", URL, ErrorHttpStatusCode(res.StatusCode), msg)
|
||||
return nil, fmt.Errorf("http request [%s] failure,status: %w response:%s", URL, HttpStatusCodeError(res.StatusCode), msg)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
type ErrorHttpStatusCode int
|
||||
type HttpStatusCodeError int
|
||||
|
||||
func (e ErrorHttpStatusCode) Error() string {
|
||||
func (e HttpStatusCodeError) Error() string {
|
||||
return fmt.Sprintf("%d|%s", e, http.StatusText(int(e)))
|
||||
}
|
||||
|
||||
|
@ -405,11 +405,8 @@ func DriverExtract(ctx context.Context, storage driver.Driver, path string, args
|
||||
return nil
|
||||
})
|
||||
link, err, _ := extractG.Do(key, fn)
|
||||
if err == nil && !link.AcquireReference() {
|
||||
for err == nil && !link.AcquireReference() {
|
||||
link, err, _ = extractG.Do(key, fn)
|
||||
if err == nil {
|
||||
link.AcquireReference()
|
||||
}
|
||||
}
|
||||
if err == errLinkMFileCache {
|
||||
if linkM != nil {
|
||||
|
@ -184,6 +184,9 @@ func Get(ctx context.Context, storage driver.Driver, path string) (model.Obj, er
|
||||
if err == nil {
|
||||
return model.WrapObjName(obj), nil
|
||||
}
|
||||
if !errs.IsNotImplement(err) {
|
||||
return nil, errors.WithMessage(err, "failed to get obj")
|
||||
}
|
||||
}
|
||||
|
||||
// is root folder
|
||||
@ -327,11 +330,8 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
|
||||
return nil
|
||||
})
|
||||
link, err, _ := linkG.Do(key, fn)
|
||||
if err == nil && !link.AcquireReference() {
|
||||
for err == nil && !link.AcquireReference() {
|
||||
link, err, _ = linkG.Do(key, fn)
|
||||
if err == nil {
|
||||
link.AcquireReference()
|
||||
}
|
||||
}
|
||||
|
||||
if err == errLinkMFileCache {
|
||||
@ -630,6 +630,11 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
|
||||
up = func(p float64) {}
|
||||
}
|
||||
|
||||
// 如果小于0,则通过缓存获取完整大小,可能发生于流式上传
|
||||
if file.GetSize() < 0 {
|
||||
log.Warnf("file size < 0, try to get full size from cache")
|
||||
file.CacheFullAndWriter(nil, nil)
|
||||
}
|
||||
switch s := storage.(type) {
|
||||
case driver.PutResult:
|
||||
var newObj model.Obj
|
||||
|
@ -15,7 +15,6 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/generic_sync"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
mapset "github.com/deckarep/golang-set/v2"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -335,6 +334,40 @@ func getStoragesByPath(path string) []driver.Driver {
|
||||
// for example, there are: /a/b,/a/c,/a/d/e,/a/b.balance1,/av
|
||||
// GetStorageVirtualFilesByPath(/a) => b,c,d
|
||||
func GetStorageVirtualFilesByPath(prefix string) []model.Obj {
|
||||
return getStorageVirtualFilesByPath(prefix, func(_ driver.Driver, obj model.Obj) model.Obj {
|
||||
return obj
|
||||
})
|
||||
}
|
||||
|
||||
func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, hideDetails ...bool) []model.Obj {
|
||||
if utils.IsBool(hideDetails...) {
|
||||
return GetStorageVirtualFilesByPath(prefix)
|
||||
}
|
||||
return getStorageVirtualFilesByPath(prefix, func(d driver.Driver, obj model.Obj) model.Obj {
|
||||
ret := &model.ObjStorageDetails{
|
||||
Obj: obj,
|
||||
StorageDetailsWithName: model.StorageDetailsWithName{
|
||||
StorageDetails: nil,
|
||||
DriverName: d.Config().Name,
|
||||
},
|
||||
}
|
||||
storage, ok := d.(driver.WithDetails)
|
||||
if !ok {
|
||||
return ret
|
||||
}
|
||||
details, err := storage.GetDetails(ctx)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errs.NotImplement) {
|
||||
log.Errorf("failed get %s storage details: %+v", d.GetStorage().MountPath, err)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
ret.StorageDetails = details
|
||||
return ret
|
||||
})
|
||||
}
|
||||
|
||||
func getStorageVirtualFilesByPath(prefix string, rootCallback func(driver.Driver, model.Obj) model.Obj) []model.Obj {
|
||||
files := make([]model.Obj, 0)
|
||||
storages := storagesMap.Values()
|
||||
sort.Slice(storages, func(i, j int) bool {
|
||||
@ -345,21 +378,30 @@ func GetStorageVirtualFilesByPath(prefix string) []model.Obj {
|
||||
})
|
||||
|
||||
prefix = utils.FixAndCleanPath(prefix)
|
||||
set := mapset.NewSet[string]()
|
||||
set := make(map[string]int)
|
||||
for _, v := range storages {
|
||||
mountPath := utils.GetActualMountPath(v.GetStorage().MountPath)
|
||||
// Exclude prefix itself and non prefix
|
||||
if len(prefix) >= len(mountPath) || !utils.IsSubPath(prefix, mountPath) {
|
||||
continue
|
||||
}
|
||||
name := strings.SplitN(strings.TrimPrefix(mountPath[len(prefix):], "/"), "/", 2)[0]
|
||||
if set.Add(name) {
|
||||
files = append(files, &model.Object{
|
||||
Name: name,
|
||||
names := strings.SplitN(strings.TrimPrefix(mountPath[len(prefix):], "/"), "/", 2)
|
||||
idx, ok := set[names[0]]
|
||||
if !ok {
|
||||
set[names[0]] = len(files)
|
||||
obj := &model.Object{
|
||||
Name: names[0],
|
||||
Size: 0,
|
||||
Modified: v.GetStorage().Modified,
|
||||
IsFolder: true,
|
||||
})
|
||||
}
|
||||
if len(names) == 1 {
|
||||
files = append(files, rootCallback(v, obj))
|
||||
} else {
|
||||
files = append(files, obj)
|
||||
}
|
||||
} else if len(names) == 1 {
|
||||
files[idx] = rootCallback(v, files[idx])
|
||||
}
|
||||
}
|
||||
return files
|
||||
|
@ -137,6 +137,60 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ
|
||||
if writer != nil {
|
||||
reader = io.TeeReader(reader, writer)
|
||||
}
|
||||
|
||||
if f.GetSize() < 0 {
|
||||
if f.peekBuff == nil {
|
||||
f.peekBuff = &buffer.Reader{}
|
||||
}
|
||||
// 检查是否有数据
|
||||
buf := []byte{0}
|
||||
n, err := io.ReadFull(reader, buf)
|
||||
if n > 0 {
|
||||
f.peekBuff.Append(buf[:n])
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
f.size = f.peekBuff.Size()
|
||||
f.Reader = f.peekBuff
|
||||
return f.peekBuff, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if conf.MaxBufferLimit-n > conf.MmapThreshold && conf.MmapThreshold > 0 {
|
||||
m, err := mmap.Alloc(conf.MaxBufferLimit - n)
|
||||
if err == nil {
|
||||
f.Add(utils.CloseFunc(func() error {
|
||||
return mmap.Free(m)
|
||||
}))
|
||||
n, err = io.ReadFull(reader, m)
|
||||
if n > 0 {
|
||||
f.peekBuff.Append(m[:n])
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
f.size = f.peekBuff.Size()
|
||||
f.Reader = f.peekBuff
|
||||
return f.peekBuff, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tmpF, err := utils.CreateTempFile(reader, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Add(utils.CloseFunc(func() error {
|
||||
return errors.Join(tmpF.Close(), os.RemoveAll(tmpF.Name()))
|
||||
}))
|
||||
peekF, err := buffer.NewPeekFile(f.peekBuff, tmpF)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.size = peekF.Size()
|
||||
f.Reader = peekF
|
||||
return peekF, nil
|
||||
}
|
||||
|
||||
f.Reader = reader
|
||||
return f.cache(f.GetSize())
|
||||
}
|
||||
@ -162,7 +216,7 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
}
|
||||
|
||||
size := httpRange.Start + httpRange.Length
|
||||
if f.peekBuff != nil && size <= int64(f.peekBuff.Len()) {
|
||||
if f.peekBuff != nil && size <= int64(f.peekBuff.Size()) {
|
||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
|
||||
@ -194,7 +248,7 @@ func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
||||
f.peekBuff = &buffer.Reader{}
|
||||
f.oriReader = f.Reader
|
||||
}
|
||||
bufSize := maxCacheSize - int64(f.peekBuff.Len())
|
||||
bufSize := maxCacheSize - int64(f.peekBuff.Size())
|
||||
var buf []byte
|
||||
if conf.MmapThreshold > 0 && bufSize >= int64(conf.MmapThreshold) {
|
||||
m, err := mmap.Alloc(int(bufSize))
|
||||
@ -213,7 +267,7 @@ func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err)
|
||||
}
|
||||
f.peekBuff.Append(buf)
|
||||
if int64(f.peekBuff.Len()) >= f.GetSize() {
|
||||
if int64(f.peekBuff.Size()) >= f.GetSize() {
|
||||
f.Reader = f.peekBuff
|
||||
f.oriReader = nil
|
||||
} else {
|
||||
|
@ -77,7 +77,7 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
|
||||
|
||||
response, err := net.RequestHttp(ctx, "GET", header, link.URL)
|
||||
if err != nil {
|
||||
if _, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok {
|
||||
if _, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fmt.Errorf("http request failure, err:%w", err)
|
||||
|
@ -8,83 +8,86 @@ import (
|
||||
// 用于存储不复用的[]byte
|
||||
type Reader struct {
|
||||
bufs [][]byte
|
||||
length int
|
||||
offset int
|
||||
size int64
|
||||
offset int64
|
||||
}
|
||||
|
||||
func (r *Reader) Len() int {
|
||||
return r.length
|
||||
func (r *Reader) Size() int64 {
|
||||
return r.size
|
||||
}
|
||||
|
||||
func (r *Reader) Append(buf []byte) {
|
||||
r.length += len(buf)
|
||||
r.size += int64(len(buf))
|
||||
r.bufs = append(r.bufs, buf)
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
n, err := r.ReadAt(p, int64(r.offset))
|
||||
n, err := r.ReadAt(p, r.offset)
|
||||
if n > 0 {
|
||||
r.offset += n
|
||||
r.offset += int64(n)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *Reader) ReadAt(p []byte, off int64) (int, error) {
|
||||
if off < 0 || off >= int64(r.length) {
|
||||
if off < 0 || off >= r.size {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n, length := 0, int64(0)
|
||||
n := 0
|
||||
readFrom := false
|
||||
for _, buf := range r.bufs {
|
||||
newLength := length + int64(len(buf))
|
||||
if readFrom {
|
||||
w := copy(p[n:], buf)
|
||||
n += w
|
||||
} else if off < newLength {
|
||||
nn := copy(p[n:], buf)
|
||||
n += nn
|
||||
if n == len(p) {
|
||||
return n, nil
|
||||
}
|
||||
} else if newOff := off - int64(len(buf)); newOff >= 0 {
|
||||
off = newOff
|
||||
} else {
|
||||
nn := copy(p, buf[off:])
|
||||
if nn == len(p) {
|
||||
return nn, nil
|
||||
}
|
||||
n += nn
|
||||
readFrom = true
|
||||
w := copy(p[n:], buf[int(off-length):])
|
||||
n += w
|
||||
}
|
||||
if n == len(p) {
|
||||
return n, nil
|
||||
}
|
||||
length = newLength
|
||||
}
|
||||
|
||||
return n, io.EOF
|
||||
}
|
||||
|
||||
func (r *Reader) Seek(offset int64, whence int) (int64, error) {
|
||||
var abs int
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
abs = int(offset)
|
||||
case io.SeekCurrent:
|
||||
abs = r.offset + int(offset)
|
||||
offset = r.offset + offset
|
||||
case io.SeekEnd:
|
||||
abs = r.length + int(offset)
|
||||
offset = r.size + offset
|
||||
default:
|
||||
return 0, errors.New("Seek: invalid whence")
|
||||
}
|
||||
|
||||
if abs < 0 || abs > r.length {
|
||||
if offset < 0 || offset > r.size {
|
||||
return 0, errors.New("Seek: invalid offset")
|
||||
}
|
||||
|
||||
r.offset = abs
|
||||
return int64(abs), nil
|
||||
r.offset = offset
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (r *Reader) Reset() {
|
||||
clear(r.bufs)
|
||||
r.bufs = nil
|
||||
r.length = 0
|
||||
r.size = 0
|
||||
r.offset = 0
|
||||
}
|
||||
|
||||
func NewReader(buf ...[]byte) *Reader {
|
||||
b := &Reader{}
|
||||
b := &Reader{
|
||||
bufs: make([][]byte, 0, len(buf)),
|
||||
}
|
||||
for _, b1 := range buf {
|
||||
b.Append(b1)
|
||||
}
|
||||
|
@ -13,8 +13,7 @@ func TestReader_ReadAt(t *testing.T) {
|
||||
}
|
||||
bs := &Reader{}
|
||||
bs.Append([]byte("github.com"))
|
||||
bs.Append([]byte("/"))
|
||||
bs.Append([]byte("OpenList"))
|
||||
bs.Append([]byte("/OpenList"))
|
||||
bs.Append([]byte("Team/"))
|
||||
bs.Append([]byte("OpenList"))
|
||||
tests := []struct {
|
||||
@ -71,7 +70,7 @@ func TestReader_ReadAt(t *testing.T) {
|
||||
off: 24,
|
||||
},
|
||||
want: func(a args, n int, err error) error {
|
||||
if n != bs.Len()-int(a.off) {
|
||||
if n != int(bs.Size()-a.off) {
|
||||
return errors.New("read length not match")
|
||||
}
|
||||
if string(a.p[:n]) != "OpenList" {
|
||||
|
88
pkg/buffer/file.go
Normal file
88
pkg/buffer/file.go
Normal file
@ -0,0 +1,88 @@
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
type PeekFile struct {
|
||||
peek *Reader
|
||||
file *os.File
|
||||
offset int64
|
||||
size int64
|
||||
}
|
||||
|
||||
func (p *PeekFile) Read(b []byte) (n int, err error) {
|
||||
n, err = p.ReadAt(b, p.offset)
|
||||
if n > 0 {
|
||||
p.offset += int64(n)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (p *PeekFile) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
if off < p.peek.Size() {
|
||||
n, err = p.peek.ReadAt(b, off)
|
||||
if err == nil || n == len(b) {
|
||||
return n, nil
|
||||
}
|
||||
// EOF
|
||||
}
|
||||
var nn int
|
||||
nn, err = p.file.ReadAt(b[n:], off+int64(n)-p.peek.Size())
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
func (p *PeekFile) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
case io.SeekCurrent:
|
||||
if offset == 0 {
|
||||
return p.offset, nil
|
||||
}
|
||||
offset = p.offset + offset
|
||||
case io.SeekEnd:
|
||||
offset = p.size + offset
|
||||
default:
|
||||
return 0, errors.New("Seek: invalid whence")
|
||||
}
|
||||
|
||||
if offset < 0 || offset > p.size {
|
||||
return 0, errors.New("Seek: invalid offset")
|
||||
}
|
||||
if offset <= p.peek.Size() {
|
||||
_, err := p.peek.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, err = p.file.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
_, err := p.peek.Seek(p.peek.Size(), io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, err = p.file.Seek(offset-p.peek.Size(), io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
p.offset = offset
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (p *PeekFile) Size() int64 {
|
||||
return p.size
|
||||
}
|
||||
|
||||
func NewPeekFile(peek *Reader, file *os.File) (*PeekFile, error) {
|
||||
stat, err := file.Stat()
|
||||
if err == nil {
|
||||
return &PeekFile{peek: peek, file: file, size: stat.Size() + peek.Size()}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
@ -57,6 +57,11 @@ var (
|
||||
Supported []*HashType
|
||||
)
|
||||
|
||||
func GetHashByName(name string) (ht *HashType, ok bool) {
|
||||
ht, ok = name2hash[name]
|
||||
return
|
||||
}
|
||||
|
||||
// RegisterHash adds a new Hash to the list and returns its Type
|
||||
func RegisterHash(name, alias string, width int, newFunc func() hash.Hash) *HashType {
|
||||
return RegisterHashWithParam(name, alias, width, func(a ...any) hash.Hash { return newFunc() })
|
||||
|
@ -200,26 +200,37 @@ type SyncClosers struct {
|
||||
var _ SyncClosersIF = (*SyncClosers)(nil)
|
||||
|
||||
func (c *SyncClosers) AcquireReference() bool {
|
||||
ref := atomic.AddInt32(&c.ref, 1)
|
||||
if ref > 0 {
|
||||
// log.Debugf("SyncClosers.AcquireReference %p,ref=%d\n", c, ref)
|
||||
return true
|
||||
for {
|
||||
ref := atomic.LoadInt32(&c.ref)
|
||||
if ref < 0 {
|
||||
return false
|
||||
}
|
||||
newRef := ref + 1
|
||||
if atomic.CompareAndSwapInt32(&c.ref, ref, newRef) {
|
||||
log.Debugf("AcquireReference %p: %d", c, newRef)
|
||||
return true
|
||||
}
|
||||
}
|
||||
atomic.StoreInt32(&c.ref, math.MinInt16)
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *SyncClosers) Close() error {
|
||||
ref := atomic.AddInt32(&c.ref, -1)
|
||||
if ref < -1 {
|
||||
atomic.StoreInt32(&c.ref, math.MinInt16)
|
||||
return nil
|
||||
for {
|
||||
ref := atomic.LoadInt32(&c.ref)
|
||||
if ref < 0 {
|
||||
return nil
|
||||
}
|
||||
newRef := ref - 1
|
||||
if newRef <= 0 {
|
||||
newRef = math.MinInt16
|
||||
}
|
||||
if atomic.CompareAndSwapInt32(&c.ref, ref, newRef) {
|
||||
log.Debugf("Close %p: %d", c, ref)
|
||||
if newRef > 0 {
|
||||
return nil
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
// log.Debugf("SyncClosers.Close %p,ref=%d\n", c, ref+1)
|
||||
if ref > 0 {
|
||||
return nil
|
||||
}
|
||||
atomic.StoreInt32(&c.ref, math.MinInt16)
|
||||
|
||||
var errs []error
|
||||
for _, closer := range c.closers {
|
||||
|
@ -147,7 +147,7 @@ func proxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) {
|
||||
if Writer.IsWritten() {
|
||||
log.Errorf("%s %s local proxy error: %+v", c.Request.Method, c.Request.URL.Path, err)
|
||||
} else {
|
||||
if statusCode, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok {
|
||||
if statusCode, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok {
|
||||
common.ErrorPage(c, err, int(statusCode), true)
|
||||
} else {
|
||||
common.ErrorPage(c, err, 500, true)
|
||||
|
@ -33,18 +33,19 @@ type DirReq struct {
|
||||
}
|
||||
|
||||
type ObjResp struct {
|
||||
Id string `json:"id"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
IsDir bool `json:"is_dir"`
|
||||
Modified time.Time `json:"modified"`
|
||||
Created time.Time `json:"created"`
|
||||
Sign string `json:"sign"`
|
||||
Thumb string `json:"thumb"`
|
||||
Type int `json:"type"`
|
||||
HashInfoStr string `json:"hashinfo"`
|
||||
HashInfo map[*utils.HashType]string `json:"hash_info"`
|
||||
Id string `json:"id"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
IsDir bool `json:"is_dir"`
|
||||
Modified time.Time `json:"modified"`
|
||||
Created time.Time `json:"created"`
|
||||
Sign string `json:"sign"`
|
||||
Thumb string `json:"thumb"`
|
||||
Type int `json:"type"`
|
||||
HashInfoStr string `json:"hashinfo"`
|
||||
HashInfo map[*utils.HashType]string `json:"hash_info"`
|
||||
MountDetails *model.StorageDetailsWithName `json:"mount_details,omitempty"`
|
||||
}
|
||||
|
||||
type FsListResp struct {
|
||||
@ -98,7 +99,10 @@ func FsList(c *gin.Context, req *ListReq, user *model.User) {
|
||||
common.ErrorStrResp(c, "Refresh without permission", 403)
|
||||
return
|
||||
}
|
||||
objs, err := fs.List(c.Request.Context(), reqPath, &fs.ListArgs{Refresh: req.Refresh})
|
||||
objs, err := fs.List(c.Request.Context(), reqPath, &fs.ListArgs{
|
||||
Refresh: req.Refresh,
|
||||
WithStorageDetails: !user.IsGuest() && !setting.GetBool(conf.HideStorageDetails),
|
||||
})
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
@ -224,19 +228,21 @@ func toObjsResp(objs []model.Obj, parent string, encrypt bool) []ObjResp {
|
||||
var resp []ObjResp
|
||||
for _, obj := range objs {
|
||||
thumb, _ := model.GetThumb(obj)
|
||||
mountDetails, _ := model.GetStorageDetails(obj)
|
||||
resp = append(resp, ObjResp{
|
||||
Id: obj.GetID(),
|
||||
Path: obj.GetPath(),
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfoStr: obj.GetHash().String(),
|
||||
HashInfo: obj.GetHash().Export(),
|
||||
Sign: common.Sign(obj, parent, encrypt),
|
||||
Thumb: thumb,
|
||||
Type: utils.GetObjType(obj.GetName(), obj.IsDir()),
|
||||
Id: obj.GetID(),
|
||||
Path: obj.GetPath(),
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfoStr: obj.GetHash().String(),
|
||||
HashInfo: obj.GetHash().Export(),
|
||||
Sign: common.Sign(obj, parent, encrypt),
|
||||
Thumb: thumb,
|
||||
Type: utils.GetObjType(obj.GetName(), obj.IsDir()),
|
||||
MountDetails: mountDetails,
|
||||
})
|
||||
}
|
||||
return resp
|
||||
@ -293,7 +299,9 @@ func FsGet(c *gin.Context, req *FsGetReq, user *model.User) {
|
||||
common.ErrorStrResp(c, "password is incorrect or you have no permission", 403)
|
||||
return
|
||||
}
|
||||
obj, err := fs.Get(c.Request.Context(), reqPath, &fs.GetArgs{})
|
||||
obj, err := fs.Get(c.Request.Context(), reqPath, &fs.GetArgs{
|
||||
WithStorageDetails: !user.IsGuest() && !setting.GetBool(conf.HideStorageDetails),
|
||||
})
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
@ -301,8 +309,8 @@ func FsGet(c *gin.Context, req *FsGetReq, user *model.User) {
|
||||
var rawURL string
|
||||
|
||||
storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{})
|
||||
provider := "unknown"
|
||||
if err == nil {
|
||||
provider, ok := model.GetProvider(obj)
|
||||
if !ok && err == nil {
|
||||
provider = storage.Config().Name
|
||||
}
|
||||
if !obj.IsDir() {
|
||||
@ -350,20 +358,22 @@ func FsGet(c *gin.Context, req *FsGetReq, user *model.User) {
|
||||
}
|
||||
parentMeta, _ := op.GetNearestMeta(parentPath)
|
||||
thumb, _ := model.GetThumb(obj)
|
||||
mountDetails, _ := model.GetStorageDetails(obj)
|
||||
common.SuccessResp(c, FsGetResp{
|
||||
ObjResp: ObjResp{
|
||||
Id: obj.GetID(),
|
||||
Path: obj.GetPath(),
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfoStr: obj.GetHash().String(),
|
||||
HashInfo: obj.GetHash().Export(),
|
||||
Sign: common.Sign(obj, parentPath, isEncrypt(meta, reqPath)),
|
||||
Type: utils.GetFileType(obj.GetName()),
|
||||
Thumb: thumb,
|
||||
Id: obj.GetID(),
|
||||
Path: obj.GetPath(),
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfoStr: obj.GetHash().String(),
|
||||
HashInfo: obj.GetHash().Export(),
|
||||
Sign: common.Sign(obj, parentPath, isEncrypt(meta, reqPath)),
|
||||
Type: utils.GetFileType(obj.GetName()),
|
||||
Thumb: thumb,
|
||||
MountDetails: mountDetails,
|
||||
},
|
||||
RawURL: rawURL,
|
||||
Readme: getReadme(meta, reqPath),
|
||||
|
@ -56,14 +56,17 @@ func FsStream(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
dir, name := stdpath.Split(path)
|
||||
sizeStr := c.GetHeader("Content-Length")
|
||||
if sizeStr == "" {
|
||||
sizeStr = "0"
|
||||
}
|
||||
size, err := strconv.ParseInt(sizeStr, 10, 64)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
// 如果请求头 Content-Length 和 X-File-Size 都没有,则 size=-1,表示未知大小的流式上传
|
||||
size := c.Request.ContentLength
|
||||
if size < 0 {
|
||||
sizeStr := c.GetHeader("X-File-Size")
|
||||
if sizeStr != "" {
|
||||
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
h := make(map[*utils.HashType]string)
|
||||
if md5 := c.GetHeader("X-File-Md5"); md5 != "" {
|
||||
|
@ -3,9 +3,11 @@ package handles
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/db"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
@ -13,6 +15,42 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type StorageResp struct {
|
||||
model.Storage
|
||||
MountDetails *model.StorageDetails `json:"mount_details,omitempty"`
|
||||
}
|
||||
|
||||
func makeStorageResp(c *gin.Context, storages []model.Storage) []*StorageResp {
|
||||
ret := make([]*StorageResp, len(storages))
|
||||
var wg sync.WaitGroup
|
||||
for i, s := range storages {
|
||||
ret[i] = &StorageResp{
|
||||
Storage: s,
|
||||
MountDetails: nil,
|
||||
}
|
||||
d, err := op.GetStorageByMountPath(s.MountPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
wd, ok := d.(driver.WithDetails)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
details, err := wd.GetDetails(c)
|
||||
if err != nil {
|
||||
log.Errorf("failed get %s details: %+v", s.MountPath, err)
|
||||
return
|
||||
}
|
||||
ret[i].MountDetails = details
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
return ret
|
||||
}
|
||||
|
||||
func ListStorages(c *gin.Context) {
|
||||
var req model.PageReq
|
||||
if err := c.ShouldBind(&req); err != nil {
|
||||
@ -27,7 +65,7 @@ func ListStorages(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
common.SuccessResp(c, common.PageResp{
|
||||
Content: storages,
|
||||
Content: makeStorageResp(c, storages),
|
||||
Total: total,
|
||||
})
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -271,7 +272,7 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta
|
||||
}
|
||||
err = common.Proxy(w, r, link, fi)
|
||||
if err != nil {
|
||||
if statusCode, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok {
|
||||
if statusCode, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok {
|
||||
return int(statusCode), err
|
||||
}
|
||||
return http.StatusInternalServerError, fmt.Errorf("webdav proxy error: %+v", err)
|
||||
@ -341,9 +342,19 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int,
|
||||
if err != nil {
|
||||
return http.StatusForbidden, err
|
||||
}
|
||||
size := r.ContentLength
|
||||
if size < 0 {
|
||||
sizeStr := r.Header.Get("X-File-Size")
|
||||
if sizeStr != "" {
|
||||
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||
if err != nil {
|
||||
return http.StatusBadRequest, err
|
||||
}
|
||||
}
|
||||
}
|
||||
obj := model.Object{
|
||||
Name: path.Base(reqPath),
|
||||
Size: r.ContentLength,
|
||||
Size: size,
|
||||
Modified: h.getModTime(r),
|
||||
Ctime: h.getCreateTime(r),
|
||||
}
|
||||
|
Reference in New Issue
Block a user