Compare commits

...

8 Commits

Author SHA1 Message Date
37e00774b3 fix(115open): fix online api
Signed-off-by: SenkjM <senkjm@outlook.com>
2025-07-26 16:55:01 +08:00
4e9c30f49d feat(fs): full support webdav cross-driver copy and move (#823)
* fix(fs): restore webdav cross-driver copy and move

* fix bug

* webdav支持复制、移动 文件夹

* 优化

* 。
2025-07-26 00:27:46 +08:00
0ee31a3f36 fix(crypt): wrong ContentLength 2025-07-25 19:55:22 +08:00
23bddf991e feat(drivers): enable local sorting for cloudreve, ilanzou (#840)
* feat(cloudreve): enable local sorting

* feat(ilanzou): enable local sorting
2025-07-25 18:01:19 +08:00
da8d6607cf fix(static): support logo replacement (#834 Close #754) 2025-07-25 17:12:51 +08:00
6134574dac fix(fs): rename bug (#832)
* fix(fs): rename bug

* chore

* fix bug

* .

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-25 13:42:39 +08:00
b273232f87 refactor(log): redir utils.Log to logrus after init (#833) 2025-07-25 13:38:45 +08:00
358e4d851e refactor(log): filter (#816) 2025-07-25 11:33:27 +08:00
20 changed files with 213 additions and 304 deletions

View File

@ -48,7 +48,15 @@ the address is defined in config file`,
gin.SetMode(gin.ReleaseMode) gin.SetMode(gin.ReleaseMode)
} }
r := gin.New() r := gin.New()
r.Use(middlewares.HTTPFilteredLogger(), gin.RecoveryWithWriter(log.StandardLogger().Out))
// gin log
if conf.Conf.Log.Filter.Enable {
r.Use(middlewares.FilteredLogger())
} else {
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out))
}
r.Use(gin.RecoveryWithWriter(log.StandardLogger().Out))
server.Init(r) server.Init(r)
var httpHandler http.Handler = r var httpHandler http.Handler = r
if conf.Conf.Scheme.EnableH2c { if conf.Conf.Scheme.EnableH2c {
@ -103,7 +111,7 @@ the address is defined in config file`,
} }
if conf.Conf.S3.Port != -1 && conf.Conf.S3.Enable { if conf.Conf.S3.Port != -1 && conf.Conf.S3.Enable {
s3r := gin.New() s3r := gin.New()
s3r.Use(middlewares.S3FilteredLogger(), gin.RecoveryWithWriter(log.StandardLogger().Out)) s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
server.InitS3(s3r) server.InitS3(s3r)
s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port) s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port)
utils.Log.Infof("start S3 server @ %s", s3Base) utils.Log.Infof("start S3 server @ %s", s3Base)

View File

@ -12,6 +12,8 @@ type Addition struct {
OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"` OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"` OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
LimitRate float64 `json:"limit_rate" type:"float" default:"1" help:"limit all api request rate ([limit]r/1s)"` LimitRate float64 `json:"limit_rate" type:"float" default:"1" help:"limit all api request rate ([limit]r/1s)"`
UseOnlineAPI bool `json:"use_online_api" default:"true"`
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/115cloud/renewapi"`
AccessToken string `json:"access_token" required:"true"` AccessToken string `json:"access_token" required:"true"`
RefreshToken string `json:"refresh_token" required:"true"` RefreshToken string `json:"refresh_token" required:"true"`
} }

View File

@ -20,6 +20,7 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "Cloudreve", Name: "Cloudreve",
DefaultRoot: "/", DefaultRoot: "/",
LocalSort: true,
} }
func init() { func init() {

View File

@ -317,8 +317,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
} }
return readSeeker, nil return readSeeker, nil
}), }),
SyncClosers: utils.NewSyncClosers(remoteLink), SyncClosers: utils.NewSyncClosers(remoteLink),
ContentLength: remoteSize,
}, nil }, nil
} }

View File

@ -31,6 +31,7 @@ func init() {
config: driver.Config{ config: driver.Config{
Name: "ILanZou", Name: "ILanZou",
DefaultRoot: "0", DefaultRoot: "0",
LocalSort: true,
}, },
conf: Conf{ conf: Conf{
base: "https://api.ilanzou.com", base: "https://api.ilanzou.com",
@ -48,6 +49,7 @@ func init() {
config: driver.Config{ config: driver.Config{
Name: "FeijiPan", Name: "FeijiPan",
DefaultRoot: "0", DefaultRoot: "0",
LocalSort: true,
}, },
conf: Conf{ conf: Conf{
base: "https://api.feijipan.com", base: "https://api.feijipan.com",

View File

@ -82,6 +82,9 @@ func InitConfig() {
if !conf.Conf.Force { if !conf.Conf.Force {
confFromEnv() confFromEnv()
} }
if len(conf.Conf.Log.Filter.Filters) == 0 {
conf.Conf.Log.Filter.Enable = false
}
// convert abs path // convert abs path
convertAbsPath := func(path *string) { convertAbsPath := func(path *string) {
if !filepath.IsAbs(*path) { if !filepath.IsAbs(*path) {

View File

@ -53,4 +53,5 @@ func Log() {
} }
log.SetOutput(logrus.StandardLogger().Out) log.SetOutput(logrus.StandardLogger().Out)
utils.Log.Infof("init logrus...") utils.Log.Infof("init logrus...")
utils.Log = logrus.StandardLogger()
} }

View File

@ -38,38 +38,24 @@ type Scheme struct {
} }
type LogConfig struct { type LogConfig struct {
Enable bool `json:"enable" env:"LOG_ENABLE"` Enable bool `json:"enable" env:"ENABLE"`
Name string `json:"name" env:"LOG_NAME"` Name string `json:"name" env:"NAME"`
MaxSize int `json:"max_size" env:"MAX_SIZE"` MaxSize int `json:"max_size" env:"MAX_SIZE"`
MaxBackups int `json:"max_backups" env:"MAX_BACKUPS"` MaxBackups int `json:"max_backups" env:"MAX_BACKUPS"`
MaxAge int `json:"max_age" env:"MAX_AGE"` MaxAge int `json:"max_age" env:"MAX_AGE"`
Compress bool `json:"compress" env:"COMPRESS"` Compress bool `json:"compress" env:"COMPRESS"`
Filter LogFilterConfig `json:"filter"` // Log filtering configuration (config file only, no env support) Filter LogFilterConfig `json:"filter" envPrefix:"FILTER_"`
} }
// LogFilterConfig holds configuration for log filtering
// Note: This configuration is only supported via config file, not environment variables
type LogFilterConfig struct { type LogFilterConfig struct {
// EnableFiltering controls whether log filtering is enabled Enable bool `json:"enable" env:"ENABLE"`
EnableFiltering bool `json:"enable_filtering"` Filters []Filter `json:"filters"`
}
// FilterHealthChecks controls whether to filter health check requests
FilterHealthChecks bool `json:"filter_health_checks"` type Filter struct {
CIDR string `json:"cidr"`
// FilterWebDAV controls whether to filter WebDAV requests (only for HTTP server) Path string `json:"path"`
FilterWebDAV bool `json:"filter_webdav"` Method string `json:"method"`
// FilterHEADRequests controls whether to filter HEAD requests
FilterHEADRequests bool `json:"filter_head_requests"`
// CustomSkipPaths allows adding custom paths to skip
CustomSkipPaths []string `json:"custom_skip_paths"`
// CustomSkipMethods allows adding custom methods to skip
CustomSkipMethods []string `json:"custom_skip_methods"`
// CustomSkipPrefixes allows adding custom path prefixes to skip
CustomSkipPrefixes []string `json:"custom_skip_prefixes"`
} }
type TaskConfig struct { type TaskConfig struct {
@ -131,7 +117,7 @@ type Config struct {
TempDir string `json:"temp_dir" env:"TEMP_DIR"` TempDir string `json:"temp_dir" env:"TEMP_DIR"`
BleveDir string `json:"bleve_dir" env:"BLEVE_DIR"` BleveDir string `json:"bleve_dir" env:"BLEVE_DIR"`
DistDir string `json:"dist_dir"` DistDir string `json:"dist_dir"`
Log LogConfig `json:"log"` Log LogConfig `json:"log" envPrefix:"LOG_"`
DelayedStart int `json:"delayed_start" env:"DELAYED_START"` DelayedStart int `json:"delayed_start" env:"DELAYED_START"`
MaxConnections int `json:"max_connections" env:"MAX_CONNECTIONS"` MaxConnections int `json:"max_connections" env:"MAX_CONNECTIONS"`
MaxConcurrency int `json:"max_concurrency" env:"MAX_CONCURRENCY"` MaxConcurrency int `json:"max_concurrency" env:"MAX_CONCURRENCY"`
@ -179,13 +165,12 @@ func DefaultConfig(dataDir string) *Config {
MaxBackups: 30, MaxBackups: 30,
MaxAge: 28, MaxAge: 28,
Filter: LogFilterConfig{ Filter: LogFilterConfig{
EnableFiltering: true, Enable: false,
FilterHealthChecks: true, Filters: []Filter{
FilterWebDAV: true, {Path: "/ping"},
FilterHEADRequests: true, {Method: "HEAD"},
CustomSkipPaths: []string{}, {Path: "/dav/", Method: "PROPFIND"},
CustomSkipMethods: []string{}, },
CustomSkipPrefixes: []string{},
}, },
}, },
MaxConnections: 0, MaxConnections: 0,

View File

@ -17,7 +17,7 @@ const (
AllowMounted = "allow_mounted" AllowMounted = "allow_mounted"
RobotsTxt = "robots_txt" RobotsTxt = "robots_txt"
Logo = "logo" Logo = "logo" // multi-lines text, L1: light, EOL: dark
Favicon = "favicon" Favicon = "favicon"
MainColor = "main_color" MainColor = "main_color"

View File

@ -55,13 +55,6 @@ func (t *ArchiveDownloadTask) Run() error {
} }
func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadTask, error) { func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadTask, error) {
var err error
if t.SrcStorage == nil {
t.SrcStorage, err = op.GetStorageByMountPath(t.SrcStorageMp)
if err != nil {
return nil, err
}
}
srcObj, tool, ss, err := op.GetArchiveToolAndStream(t.Ctx(), t.SrcStorage, t.SrcActualPath, model.LinkArgs{}) srcObj, tool, ss, err := op.GetArchiveToolAndStream(t.Ctx(), t.SrcStorage, t.SrcActualPath, model.LinkArgs{})
if err != nil { if err != nil {
return nil, err return nil, err
@ -111,7 +104,7 @@ func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadT
baseName := strings.TrimSuffix(srcObj.GetName(), stdpath.Ext(srcObj.GetName())) baseName := strings.TrimSuffix(srcObj.GetName(), stdpath.Ext(srcObj.GetName()))
uploadTask := &ArchiveContentUploadTask{ uploadTask := &ArchiveContentUploadTask{
TaskExtension: task.TaskExtension{ TaskExtension: task.TaskExtension{
Creator: t.GetCreator(), Creator: t.Creator,
ApiUrl: t.ApiUrl, ApiUrl: t.ApiUrl,
}, },
ObjName: baseName, ObjName: baseName,
@ -179,13 +172,6 @@ func (t *ArchiveContentUploadTask) SetRetry(retry int, maxRetry int) {
} }
func (t *ArchiveContentUploadTask) RunWithNextTaskCallback(f func(nextTask *ArchiveContentUploadTask) error) error { func (t *ArchiveContentUploadTask) RunWithNextTaskCallback(f func(nextTask *ArchiveContentUploadTask) error) error {
var err error
if t.dstStorage == nil {
t.dstStorage, err = op.GetStorageByMountPath(t.DstStorageMp)
if err != nil {
return err
}
}
info, err := os.Stat(t.FilePath) info, err := os.Stat(t.FilePath)
if err != nil { if err != nil {
return err return err
@ -224,7 +210,7 @@ func (t *ArchiveContentUploadTask) RunWithNextTaskCallback(f func(nextTask *Arch
} }
err = f(&ArchiveContentUploadTask{ err = f(&ArchiveContentUploadTask{
TaskExtension: task.TaskExtension{ TaskExtension: task.TaskExtension{
Creator: t.GetCreator(), Creator: t.Creator,
ApiUrl: t.ApiUrl, ApiUrl: t.ApiUrl,
}, },
ObjName: entry.Name(), ObjName: entry.Name(),
@ -243,11 +229,11 @@ func (t *ArchiveContentUploadTask) RunWithNextTaskCallback(f func(nextTask *Arch
return es return es
} }
} else { } else {
t.SetTotalBytes(info.Size())
file, err := os.Open(t.FilePath) file, err := os.Open(t.FilePath)
if err != nil { if err != nil {
return err return err
} }
t.SetTotalBytes(info.Size())
fs := &stream.FileStream{ fs := &stream.FileStream{
Obj: &model.Object{ Obj: &model.Object{
Name: t.ObjName, Name: t.ObjName,
@ -379,13 +365,8 @@ func archiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args
return nil, err return nil, err
} }
} }
taskCreator, _ := ctx.Value(conf.UserKey).(*model.User)
tsk := &ArchiveDownloadTask{ tsk := &ArchiveDownloadTask{
TaskData: TaskData{ TaskData: TaskData{
TaskExtension: task.TaskExtension{
Creator: taskCreator,
ApiUrl: common.GetApiUrl(ctx),
},
SrcStorage: srcStorage, SrcStorage: srcStorage,
DstStorage: dstStorage, DstStorage: dstStorage,
SrcActualPath: srcObjActualPath, SrcActualPath: srcObjActualPath,
@ -396,6 +377,7 @@ func archiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args
ArchiveDecompressArgs: args, ArchiveDecompressArgs: args,
} }
if ctx.Value(conf.NoTaskKey) != nil { if ctx.Value(conf.NoTaskKey) != nil {
tsk.Base.SetCtx(ctx)
uploadTask, err := tsk.RunWithoutPushUploadTask() uploadTask, err := tsk.RunWithoutPushUploadTask()
if err != nil { if err != nil {
return nil, errors.WithMessagef(err, "failed download [%s]", srcObjPath) return nil, errors.WithMessagef(err, "failed download [%s]", srcObjPath)
@ -403,12 +385,16 @@ func archiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args
defer uploadTask.deleteSrcFile() defer uploadTask.deleteSrcFile()
var callback func(t *ArchiveContentUploadTask) error var callback func(t *ArchiveContentUploadTask) error
callback = func(t *ArchiveContentUploadTask) error { callback = func(t *ArchiveContentUploadTask) error {
t.Base.SetCtx(ctx)
e := t.RunWithNextTaskCallback(callback) e := t.RunWithNextTaskCallback(callback)
t.deleteSrcFile() t.deleteSrcFile()
return e return e
} }
uploadTask.Base.SetCtx(ctx)
return nil, uploadTask.RunWithNextTaskCallback(callback) return nil, uploadTask.RunWithNextTaskCallback(callback)
} else { } else {
tsk.Creator, _ = ctx.Value(conf.UserKey).(*model.User)
tsk.ApiUrl = common.GetApiUrl(ctx)
ArchiveDownloadTaskManager.Add(tsk) ArchiveDownloadTaskManager.Add(tsk)
return tsk, nil return tsk, nil
} }

View File

@ -7,7 +7,6 @@ import (
"time" "time"
"github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
@ -52,17 +51,16 @@ func (t *FileTransferTask) Run() error {
t.ClearEndTime() t.ClearEndTime()
t.SetStartTime(time.Now()) t.SetStartTime(time.Now())
defer func() { t.SetEndTime(time.Now()) }() defer func() { t.SetEndTime(time.Now()) }()
var err error return t.RunWithNextTaskCallback(func(nextTask *FileTransferTask) error {
if t.SrcStorage == nil { nextTask.groupID = t.groupID
t.SrcStorage, err = op.GetStorageByMountPath(t.SrcStorageMp) task_group.TransferCoordinator.AddTask(t.groupID, nil)
} if t.TaskType == copy {
if t.DstStorage == nil { CopyTaskManager.Add(nextTask)
t.DstStorage, err = op.GetStorageByMountPath(t.DstStorageMp) } else {
} MoveTaskManager.Add(nextTask)
if err != nil { }
return errors.WithMessage(err, "failed get storage") return nil
} })
return putBetween2Storages(t, t.SrcStorage, t.DstStorage, t.SrcActualPath, t.DstActualPath)
} }
func (t *FileTransferTask) OnSucceeded() { func (t *FileTransferTask) OnSucceeded() {
@ -109,51 +107,11 @@ func transfer(ctx context.Context, taskType taskType, srcObjPath, dstDirPath str
return nil, err return nil, err
} }
} }
} else if ctx.Value(conf.NoTaskKey) != nil {
return nil, fmt.Errorf("can't %s files between two storages, please use the front-end ", taskType)
} }
// if ctx.Value(conf.NoTaskKey) != nil { // webdav
// srcObj, err := op.Get(ctx, srcStorage, srcObjActualPath)
// if err != nil {
// return nil, errors.WithMessagef(err, "failed get src [%s] file", srcObjPath)
// }
// if !srcObj.IsDir() {
// // copy file directly
// link, _, err := op.Link(ctx, srcStorage, srcObjActualPath, model.LinkArgs{})
// if err != nil {
// return nil, errors.WithMessagef(err, "failed get [%s] link", srcObjPath)
// }
// // any link provided is seekable
// ss, err := stream.NewSeekableStream(&stream.FileStream{
// Obj: srcObj,
// Ctx: ctx,
// }, link)
// if err != nil {
// _ = link.Close()
// return nil, errors.WithMessagef(err, "failed get [%s] stream", srcObjPath)
// }
// if taskType == move {
// defer func() {
// task_group.TransferCoordinator.Done(dstDirPath, err == nil)
// }()
// task_group.TransferCoordinator.AddTask(dstDirPath, task_group.SrcPathToRemove(srcObjPath))
// }
// err = op.Put(ctx, dstStorage, dstDirActualPath, ss, nil, taskType == move)
// return nil, err
// } else {
// return nil, fmt.Errorf("can't %s dir two storages, please use the front-end ", taskType)
// }
// }
// not in the same storage // not in the same storage
taskCreator, _ := ctx.Value(conf.UserKey).(*model.User)
t := &FileTransferTask{ t := &FileTransferTask{
TaskData: TaskData{ TaskData: TaskData{
TaskExtension: task.TaskExtension{
Creator: taskCreator,
ApiUrl: common.GetApiUrl(ctx),
},
SrcStorage: srcStorage, SrcStorage: srcStorage,
DstStorage: dstStorage, DstStorage: dstStorage,
SrcActualPath: srcObjActualPath, SrcActualPath: srcObjActualPath,
@ -162,8 +120,34 @@ func transfer(ctx context.Context, taskType taskType, srcObjPath, dstDirPath str
DstStorageMp: dstStorage.GetStorage().MountPath, DstStorageMp: dstStorage.GetStorage().MountPath,
}, },
TaskType: taskType, TaskType: taskType,
groupID: dstDirPath,
} }
if ctx.Value(conf.NoTaskKey) != nil {
var callback func(nextTask *FileTransferTask) error
hasSuccess := false
callback = func(nextTask *FileTransferTask) error {
nextTask.Base.SetCtx(ctx)
err := nextTask.RunWithNextTaskCallback(callback)
if err == nil {
hasSuccess = true
}
return err
}
t.Base.SetCtx(ctx)
err = t.RunWithNextTaskCallback(callback)
if hasSuccess || err == nil {
if taskType == move {
task_group.RefreshAndRemove(dstDirPath, task_group.SrcPathToRemove(srcObjPath))
} else {
op.DeleteCache(t.DstStorage, dstDirActualPath)
}
}
return nil, err
}
t.Creator, _ = ctx.Value(conf.UserKey).(*model.User)
t.ApiUrl = common.GetApiUrl(ctx)
t.groupID = dstDirPath
if taskType == copy { if taskType == copy {
task_group.TransferCoordinator.AddTask(dstDirPath, nil) task_group.TransferCoordinator.AddTask(dstDirPath, nil)
CopyTaskManager.Add(t) CopyTaskManager.Add(t)
@ -174,76 +158,69 @@ func transfer(ctx context.Context, taskType taskType, srcObjPath, dstDirPath str
return t, nil return t, nil
} }
func putBetween2Storages(t *FileTransferTask, srcStorage, dstStorage driver.Driver, srcActualPath, dstDirActualPath string) error { func (t *FileTransferTask) RunWithNextTaskCallback(f func(nextTask *FileTransferTask) error) error {
t.Status = "getting src object" t.Status = "getting src object"
srcObj, err := op.Get(t.Ctx(), srcStorage, srcActualPath) srcObj, err := op.Get(t.Ctx(), t.SrcStorage, t.SrcActualPath)
if err != nil { if err != nil {
return errors.WithMessagef(err, "failed get src [%s] file", srcActualPath) return errors.WithMessagef(err, "failed get src [%s] file", t.SrcActualPath)
} }
if srcObj.IsDir() { if srcObj.IsDir() {
t.Status = "src object is dir, listing objs" t.Status = "src object is dir, listing objs"
objs, err := op.List(t.Ctx(), srcStorage, srcActualPath, model.ListArgs{}) objs, err := op.List(t.Ctx(), t.SrcStorage, t.SrcActualPath, model.ListArgs{})
if err != nil { if err != nil {
return errors.WithMessagef(err, "failed list src [%s] objs", srcActualPath) return errors.WithMessagef(err, "failed list src [%s] objs", t.SrcActualPath)
} }
dstActualPath := stdpath.Join(dstDirActualPath, srcObj.GetName()) dstActualPath := stdpath.Join(t.DstActualPath, srcObj.GetName())
if t.TaskType == copy { if t.TaskType == copy {
task_group.TransferCoordinator.AppendPayload(t.groupID, task_group.DstPathToRefresh(dstActualPath)) if t.Ctx().Value(conf.NoTaskKey) != nil {
defer op.DeleteCache(t.DstStorage, dstActualPath)
} else {
task_group.TransferCoordinator.AppendPayload(t.groupID, task_group.DstPathToRefresh(dstActualPath))
}
} }
for _, obj := range objs { for _, obj := range objs {
if utils.IsCanceled(t.Ctx()) { if utils.IsCanceled(t.Ctx()) {
return nil return nil
} }
task := &FileTransferTask{ err = f(&FileTransferTask{
TaskType: t.TaskType, TaskType: t.TaskType,
TaskData: TaskData{ TaskData: TaskData{
TaskExtension: task.TaskExtension{ TaskExtension: task.TaskExtension{
Creator: t.GetCreator(), Creator: t.Creator,
ApiUrl: t.ApiUrl, ApiUrl: t.ApiUrl,
}, },
SrcStorage: srcStorage, SrcStorage: t.SrcStorage,
DstStorage: dstStorage, DstStorage: t.DstStorage,
SrcActualPath: stdpath.Join(srcActualPath, obj.GetName()), SrcActualPath: stdpath.Join(t.SrcActualPath, obj.GetName()),
DstActualPath: dstActualPath, DstActualPath: dstActualPath,
SrcStorageMp: srcStorage.GetStorage().MountPath, SrcStorageMp: t.SrcStorageMp,
DstStorageMp: dstStorage.GetStorage().MountPath, DstStorageMp: t.DstStorageMp,
}, },
groupID: t.groupID, })
} if err != nil {
task_group.TransferCoordinator.AddTask(t.groupID, nil) return err
if t.TaskType == copy {
CopyTaskManager.Add(task)
} else {
MoveTaskManager.Add(task)
} }
} }
t.Status = fmt.Sprintf("src object is dir, added all %s tasks of objs", t.TaskType) t.Status = fmt.Sprintf("src object is dir, added all %s tasks of objs", t.TaskType)
return nil return nil
} }
return putFileBetween2Storages(t, srcStorage, dstStorage, srcActualPath, dstDirActualPath)
}
func putFileBetween2Storages(tsk *FileTransferTask, srcStorage, dstStorage driver.Driver, srcActualPath, dstDirActualPath string) error { link, _, err := op.Link(t.Ctx(), t.SrcStorage, t.SrcActualPath, model.LinkArgs{})
srcFile, err := op.Get(tsk.Ctx(), srcStorage, srcActualPath)
if err != nil { if err != nil {
return errors.WithMessagef(err, "failed get src [%s] file", srcActualPath) return errors.WithMessagef(err, "failed get [%s] link", t.SrcActualPath)
}
tsk.SetTotalBytes(srcFile.GetSize())
link, _, err := op.Link(tsk.Ctx(), srcStorage, srcActualPath, model.LinkArgs{})
if err != nil {
return errors.WithMessagef(err, "failed get [%s] link", srcActualPath)
} }
// any link provided is seekable // any link provided is seekable
ss, err := stream.NewSeekableStream(&stream.FileStream{ ss, err := stream.NewSeekableStream(&stream.FileStream{
Obj: srcFile, Obj: srcObj,
Ctx: tsk.Ctx(), Ctx: t.Ctx(),
}, link) }, link)
if err != nil { if err != nil {
_ = link.Close() _ = link.Close()
return errors.WithMessagef(err, "failed get [%s] stream", srcActualPath) return errors.WithMessagef(err, "failed get [%s] stream", t.SrcActualPath)
} }
tsk.SetTotalBytes(ss.GetSize()) t.SetTotalBytes(ss.GetSize())
return op.Put(tsk.Ctx(), dstStorage, dstDirActualPath, ss, tsk.SetProgress, true) t.Status = "uploading"
return op.Put(t.Ctx(), t.DstStorage, t.DstActualPath, ss, t.SetProgress, true)
} }
var ( var (

View File

@ -6,7 +6,7 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
type OnCompletionFunc func(groupID string, payloads []any) type OnCompletionFunc func(groupID string, payloads ...any)
type TaskGroupCoordinator struct { type TaskGroupCoordinator struct {
name string name string
mu sync.Mutex mu sync.Mutex
@ -71,7 +71,7 @@ func (tgc *TaskGroupCoordinator) Done(groupID string, success bool) {
if tgc.onCompletion != nil && state.hasSuccess { if tgc.onCompletion != nil && state.hasSuccess {
logrus.Debugf("OnCompletion:%s", groupID) logrus.Debugf("OnCompletion:%s", groupID)
tgc.mu.Unlock() tgc.mu.Unlock()
tgc.onCompletion(groupID, payloads) tgc.onCompletion(groupID, payloads...)
tgc.mu.Lock() tgc.mu.Lock()
} }
return return

View File

@ -17,7 +17,7 @@ type SrcPathToRemove string
// ActualPath // ActualPath
type DstPathToRefresh string type DstPathToRefresh string
func refreshAndRemove(dstPath string, payloads []any) { func RefreshAndRemove(dstPath string, payloads ...any) {
dstStorage, dstActualPath, err := op.GetStorageAndActualPath(dstPath) dstStorage, dstActualPath, err := op.GetStorageAndActualPath(dstPath)
if err != nil { if err != nil {
log.Error(errors.WithMessage(err, "failed get dst storage")) log.Error(errors.WithMessage(err, "failed get dst storage"))
@ -100,4 +100,4 @@ func verifyAndRemove(ctx context.Context, srcStorage, dstStorage driver.Driver,
return nil return nil
} }
var TransferCoordinator *TaskGroupCoordinator = NewTaskGroupCoordinator("RefreshAndRemove", refreshAndRemove) var TransferCoordinator *TaskGroupCoordinator = NewTaskGroupCoordinator("RefreshAndRemove", RefreshAndRemove)

View File

@ -75,20 +75,12 @@ func EncodePath(path string, all ...bool) string {
} }
func JoinBasePath(basePath, reqPath string) (string, error) { func JoinBasePath(basePath, reqPath string) (string, error) {
reqPath, err := CheckRelativePath(reqPath) isRelativePath := strings.Contains(reqPath, "..")
if err != nil { reqPath = FixAndCleanPath(reqPath)
return "", err if isRelativePath && !strings.Contains(reqPath, "..") {
}
return stdpath.Join(FixAndCleanPath(basePath), reqPath), nil
}
func CheckRelativePath(path string) (string, error) {
isRelativePath := strings.Contains(path, "..")
path = FixAndCleanPath(path)
if isRelativePath && !strings.Contains(path, "..") {
return "", errs.RelativePath return "", errs.RelativePath
} }
return path, nil return stdpath.Join(FixAndCleanPath(basePath), reqPath), nil
} }
func GetFullPath(mountPath, path string) string { func GetFullPath(mountPath, path string) string {

View File

@ -11,7 +11,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/generic" "github.com/OpenListTeam/OpenList/v4/pkg/generic"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common" "github.com/OpenListTeam/OpenList/v4/server/common"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -174,7 +173,7 @@ func FsBatchRename(c *gin.Context) {
if renameObject.SrcName == "" || renameObject.NewName == "" { if renameObject.SrcName == "" || renameObject.NewName == "" {
continue continue
} }
renameObject.NewName, err = utils.CheckRelativePath(renameObject.NewName) err = checkRelativePath(renameObject.NewName)
if err != nil { if err != nil {
common.ErrorResp(c, err, 403) common.ErrorResp(c, err, 403)
return return
@ -235,7 +234,8 @@ func FsRegexRename(c *gin.Context) {
for _, file := range files { for _, file := range files {
if srcRegexp.MatchString(file.GetName()) { if srcRegexp.MatchString(file.GetName()) {
newFileName, err := utils.CheckRelativePath(srcRegexp.ReplaceAllString(file.GetName(), req.NewNameRegex)) newFileName := srcRegexp.ReplaceAllString(file.GetName(), req.NewNameRegex)
err := checkRelativePath(newFileName)
if err != nil { if err != nil {
common.ErrorResp(c, err, 403) common.ErrorResp(c, err, 403)
return return

View File

@ -3,6 +3,7 @@ package handles
import ( import (
"fmt" "fmt"
stdpath "path" stdpath "path"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/task" "github.com/OpenListTeam/OpenList/v4/internal/task"
@ -205,7 +206,7 @@ func FsRename(c *gin.Context) {
} }
reqPath, err := user.JoinPath(req.Path) reqPath, err := user.JoinPath(req.Path)
if err == nil { if err == nil {
req.Name, err = utils.CheckRelativePath(req.Name) err = checkRelativePath(req.Name)
} }
if err != nil { if err != nil {
common.ErrorResp(c, err, 403) common.ErrorResp(c, err, 403)
@ -227,6 +228,13 @@ func FsRename(c *gin.Context) {
common.SuccessResp(c) common.SuccessResp(c)
} }
func checkRelativePath(path string) error {
if strings.ContainsAny(path, "/\\") || path == "" || path == "." || path == ".." {
return errs.RelativePath
}
return nil
}
type RemoveReq struct { type RemoveReq struct {
Dir string `json:"dir"` Dir string `json:"dir"`
Names []string `json:"names"` Names []string `json:"names"`

View File

@ -1,55 +0,0 @@
package middlewares
import (
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/gin-gonic/gin"
log "github.com/sirupsen/logrus"
)
// UnifiedFilteredLogger returns a filtered logger using global configuration
// serverType: "http" for main HTTP server, "s3" for S3 server
func UnifiedFilteredLogger(serverType string) gin.HandlerFunc {
config := conf.Conf.Log.Filter
if !config.EnableFiltering {
// Return standard Gin logger if filtering is disabled
return gin.LoggerWithWriter(log.StandardLogger().Out)
}
loggerConfig := FilteredLoggerConfig{
Output: log.StandardLogger().Out,
}
// Add health check paths
if config.FilterHealthChecks {
loggerConfig.SkipPaths = append(loggerConfig.SkipPaths, "/ping")
}
// Add HEAD method filtering
if config.FilterHEADRequests {
loggerConfig.SkipMethods = append(loggerConfig.SkipMethods, "HEAD")
}
// Add WebDAV filtering only for HTTP server (not for S3)
if config.FilterWebDAV && serverType == "http" {
loggerConfig.SkipPathPrefixes = append(loggerConfig.SkipPathPrefixes, "/dav/")
loggerConfig.SkipMethods = append(loggerConfig.SkipMethods, "PROPFIND")
}
// Add custom configurations
loggerConfig.SkipPaths = append(loggerConfig.SkipPaths, config.CustomSkipPaths...)
loggerConfig.SkipMethods = append(loggerConfig.SkipMethods, config.CustomSkipMethods...)
loggerConfig.SkipPathPrefixes = append(loggerConfig.SkipPathPrefixes, config.CustomSkipPrefixes...)
return FilteredLoggerWithConfig(loggerConfig)
}
// HTTPFilteredLogger returns a filtered logger for the main HTTP server
func HTTPFilteredLogger() gin.HandlerFunc {
return UnifiedFilteredLogger("http")
}
// S3FilteredLogger returns a filtered logger for the S3 server
func S3FilteredLogger() gin.HandlerFunc {
return UnifiedFilteredLogger("s3")
}

View File

@ -1,101 +1,99 @@
package middlewares package middlewares
import ( import (
"fmt" "net/netip"
"io"
"strings" "strings"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
// FilteredLoggerConfig defines the configuration for the filtered logger type filter struct {
type FilteredLoggerConfig struct { CIDR *netip.Prefix `json:"cidr,omitempty"`
// SkipPaths is a list of URL paths to skip logging Path *string `json:"path,omitempty"`
SkipPaths []string Method *string `json:"method,omitempty"`
// SkipMethods is a list of HTTP methods to skip logging
SkipMethods []string
// SkipPathPrefixes is a list of URL path prefixes to skip logging
SkipPathPrefixes []string
// Output is the writer where logs will be written
Output io.Writer
} }
// FilteredLoggerWithConfig returns a gin.HandlerFunc (middleware) that logs requests var filterList []*filter
// but skips logging for specified paths, methods, or path prefixes
func FilteredLoggerWithConfig(config FilteredLoggerConfig) gin.HandlerFunc {
if config.Output == nil {
config.Output = log.StandardLogger().Out
}
return gin.LoggerWithConfig(gin.LoggerConfig{ func initFilterList() {
Output: config.Output, for _, s := range conf.Conf.Log.Filter.Filters {
SkipPaths: config.SkipPaths, f := new(filter)
Formatter: func(param gin.LogFormatterParams) string {
// Skip logging for health check endpoints if s.CIDR != "" {
if shouldSkipLogging(param.Path, param.Method, config) { cidr, err := netip.ParsePrefix(s.CIDR)
return "" if err != nil {
log.Errorf("failed to parse CIDR %s: %v", s.CIDR, err)
continue
} }
f.CIDR = &cidr
}
// Use a custom log format similar to Gin's default if s.Path != "" {
return defaultLogFormatter(param) f.Path = &s.Path
}, }
})
if s.Method != "" {
f.Method = &s.Method
}
if f.CIDR == nil && f.Path == nil && f.Method == nil {
log.Warnf("filter %s is empty, skipping", s)
continue
}
filterList = append(filterList, f)
log.Debugf("added filter: %+v", f)
}
log.Infof("Loaded %d log filters.", len(filterList))
} }
func skiperDecider(c *gin.Context) bool {
// every filter need metch all condithon as filter match
// so if any condithon not metch, skip this filter
// all filters misatch, log this request
// shouldSkipLogging determines if a request should be skipped from logging for _, f := range filterList {
func shouldSkipLogging(path, method string, config FilteredLoggerConfig) bool { if f.CIDR != nil {
// Check if path should be skipped cip := netip.MustParseAddr(c.ClientIP())
for _, skipPath := range config.SkipPaths { if !f.CIDR.Contains(cip) {
if path == skipPath { continue
return true }
} }
}
// Check if method should be skipped if f.Path != nil {
for _, skipMethod := range config.SkipMethods { if (*f.Path)[0] == '/' {
if method == skipMethod { // match path as prefix/exact path
return true if !strings.HasPrefix(c.Request.URL.Path, *f.Path) {
continue
}
} else {
// match path as relative path
if !strings.Contains(c.Request.URL.Path, "/"+*f.Path) {
continue
}
}
} }
}
// Check if path prefix should be skipped if f.Method != nil {
for _, skipPrefix := range config.SkipPathPrefixes { if *f.Method != c.Request.Method {
if strings.HasPrefix(path, skipPrefix) { continue
return true }
} }
}
// Special case: Skip PROPFIND requests (common in WebDAV)
if method == "PROPFIND" {
return true return true
} }
return false return false
} }
// defaultLogFormatter provides a default log format similar to Gin's built-in formatter func FilteredLogger() gin.HandlerFunc {
func defaultLogFormatter(param gin.LogFormatterParams) string { initFilterList()
var statusColor, methodColor, resetColor string
if param.IsOutputColor() {
statusColor = param.StatusCodeColor()
methodColor = param.MethodColor()
resetColor = param.ResetColor()
}
if param.Latency > time.Minute { return gin.LoggerWithConfig(gin.LoggerConfig{
param.Latency = param.Latency.Truncate(time.Second) Output: log.StandardLogger().Out,
} Skip: skiperDecider,
})
return fmt.Sprintf("[GIN] %v |%s %3d %s| %13v | %15s |%s %-7s %s %#v\n%s", }
param.TimeStamp.Format("2006/01/02 - 15:04:05"),
statusColor, param.StatusCode, resetColor,
param.Latency,
param.ClientIP,
methodColor, param.Method, resetColor,
param.Path,
param.ErrorMessage,
)
}

View File

@ -15,7 +15,7 @@ type SiteConfig struct {
func getSiteConfig() SiteConfig { func getSiteConfig() SiteConfig {
siteConfig := SiteConfig{ siteConfig := SiteConfig{
BasePath: conf.URL.Path, BasePath: conf.URL.Path,
Cdn: strings.ReplaceAll(strings.TrimSuffix(conf.Conf.Cdn, "/"), "$version", strings.TrimPrefix(conf.WebVersion, "v"),), Cdn: strings.ReplaceAll(strings.TrimSuffix(conf.Conf.Cdn, "/"), "$version", strings.TrimPrefix(conf.WebVersion, "v")),
} }
if siteConfig.BasePath != "" { if siteConfig.BasePath != "" {
siteConfig.BasePath = utils.FixAndCleanPath(siteConfig.BasePath) siteConfig.BasePath = utils.FixAndCleanPath(siteConfig.BasePath)

View File

@ -41,9 +41,8 @@ func replaceStrings(content string, replacements map[string]string) string {
return content return content
} }
func initIndex() { func initIndex(siteConfig SiteConfig) {
utils.Log.Debug("Initializing index.html...") utils.Log.Debug("Initializing index.html...")
siteConfig := getSiteConfig()
// dist_dir is empty and cdn is not empty add web_version is empty or beta or dev // dist_dir is empty and cdn is not empty add web_version is empty or beta or dev
if conf.Conf.DistDir == "" && conf.Conf.Cdn != "" && (conf.WebVersion == "" || conf.WebVersion == "beta" || conf.WebVersion == "dev") { if conf.Conf.DistDir == "" && conf.Conf.Cdn != "" && (conf.WebVersion == "" || conf.WebVersion == "beta" || conf.WebVersion == "dev") {
utils.Log.Infof("Fetching index.html from CDN: %s/index.html...", conf.Conf.Cdn) utils.Log.Infof("Fetching index.html from CDN: %s/index.html...", conf.Conf.Cdn)
@ -89,6 +88,7 @@ func initIndex() {
func UpdateIndex() { func UpdateIndex() {
utils.Log.Debug("Updating index.html with settings...") utils.Log.Debug("Updating index.html with settings...")
favicon := setting.GetStr(conf.Favicon) favicon := setting.GetStr(conf.Favicon)
logo := strings.Split(setting.GetStr(conf.Logo), "\n")[0]
title := setting.GetStr(conf.SiteTitle) title := setting.GetStr(conf.SiteTitle)
customizeHead := setting.GetStr(conf.CustomizeHead) customizeHead := setting.GetStr(conf.CustomizeHead)
customizeBody := setting.GetStr(conf.CustomizeBody) customizeBody := setting.GetStr(conf.CustomizeBody)
@ -96,6 +96,7 @@ func UpdateIndex() {
utils.Log.Debug("Applying replacements for default pages...") utils.Log.Debug("Applying replacements for default pages...")
replaceMap1 := map[string]string{ replaceMap1 := map[string]string{
"https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.svg": favicon, "https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.svg": favicon,
"https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.png": logo,
"Loading...": title, "Loading...": title,
"main_color: undefined": fmt.Sprintf("main_color: '%s'", mainColor), "main_color: undefined": fmt.Sprintf("main_color: '%s'", mainColor),
} }
@ -111,8 +112,9 @@ func UpdateIndex() {
func Static(r *gin.RouterGroup, noRoute func(handlers ...gin.HandlerFunc)) { func Static(r *gin.RouterGroup, noRoute func(handlers ...gin.HandlerFunc)) {
utils.Log.Debug("Setting up static routes...") utils.Log.Debug("Setting up static routes...")
siteConfig := getSiteConfig()
initStatic() initStatic()
initIndex() initIndex(siteConfig)
folders := []string{"assets", "images", "streamer", "static"} folders := []string{"assets", "images", "streamer", "static"}
if conf.Conf.Cdn == "" { if conf.Conf.Cdn == "" {
utils.Log.Debug("Setting up static file serving...") utils.Log.Debug("Setting up static file serving...")
@ -136,7 +138,7 @@ func Static(r *gin.RouterGroup, noRoute func(handlers ...gin.HandlerFunc)) {
for _, folder := range folders { for _, folder := range folders {
r.GET(fmt.Sprintf("/%s/*filepath", folder), func(c *gin.Context) { r.GET(fmt.Sprintf("/%s/*filepath", folder), func(c *gin.Context) {
filepath := c.Param("filepath") filepath := c.Param("filepath")
c.Redirect(http.StatusFound, fmt.Sprintf("%s/%s%s", conf.Conf.Cdn, folder, filepath)) c.Redirect(http.StatusFound, fmt.Sprintf("%s/%s%s", siteConfig.Cdn, folder, filepath))
}) })
} }
} }