mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-09-20 04:36:09 +08:00
Compare commits
33 Commits
Author | SHA1 | Date | |
---|---|---|---|
55d3827dee | |||
1fbc9427df | |||
bb3d139a47 | |||
d227ab85d6 | |||
5342ae96d0 | |||
273e15a050 | |||
13aad2c2fa | |||
368dc65a6e | |||
8b4b6ba970 | |||
4d28e838ce | |||
3930d4789a | |||
d0c22a1ecb | |||
57fceabcf4 | |||
8c244a984d | |||
df479ba806 | |||
5ae8e96237 | |||
aa0ced47b0 | |||
ab747d9052 | |||
93c06213d4 | |||
b9b8eed285 | |||
317d190b77 | |||
52d7d819ad | |||
0483e0f868 | |||
08dae4f55f | |||
9ac0484bc0 | |||
8cf15183a0 | |||
c8f2aaaa55 | |||
1208bd0a83 | |||
6b096bcad4 | |||
58dbf088f9 | |||
05ff7908f2 | |||
a703b736c9 | |||
e458f2ab53 |
11
buf.gen.yaml
Normal file
11
buf.gen.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
version: v1
|
||||
plugins:
|
||||
- plugin: buf.build/protocolbuffers/go:v1.36.7
|
||||
out: .
|
||||
opt:
|
||||
- paths=source_relative
|
||||
- plugin: buf.build/grpc/go:v1.5.1
|
||||
out: .
|
||||
opt:
|
||||
- paths=source_relative
|
||||
- require_unimplemented_servers=false
|
@ -1,51 +1,42 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"context"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/bootstrap"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/bootstrap/data"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/db"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/OpenListTeam/OpenList/v5/cmd/flags"
|
||||
"github.com/OpenListTeam/OpenList/v5/internal/bootstrap"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func Init() {
|
||||
func Init(ctx context.Context) {
|
||||
if flags.Dev {
|
||||
flags.Debug = true
|
||||
}
|
||||
initLogrus()
|
||||
bootstrap.InitConfig()
|
||||
bootstrap.Log()
|
||||
bootstrap.InitDB()
|
||||
data.InitData()
|
||||
bootstrap.InitStreamLimit()
|
||||
bootstrap.InitIndex()
|
||||
bootstrap.InitUpgradePatch()
|
||||
bootstrap.InitDriverPlugins()
|
||||
}
|
||||
|
||||
func Release() {
|
||||
db.Close()
|
||||
|
||||
}
|
||||
|
||||
var pid = -1
|
||||
var pidFile string
|
||||
|
||||
func initDaemon() {
|
||||
ex, err := os.Executable()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
exPath := filepath.Dir(ex)
|
||||
_ = os.MkdirAll(filepath.Join(exPath, "daemon"), 0700)
|
||||
pidFile = filepath.Join(exPath, "daemon/pid")
|
||||
if utils.Exists(pidFile) {
|
||||
bytes, err := os.ReadFile(pidFile)
|
||||
if err != nil {
|
||||
log.Fatal("failed to read pid file", err)
|
||||
}
|
||||
id, err := strconv.Atoi(string(bytes))
|
||||
if err != nil {
|
||||
log.Fatal("failed to parse pid data", err)
|
||||
}
|
||||
pid = id
|
||||
func initLog(l *logrus.Logger) {
|
||||
if flags.Debug {
|
||||
l.SetLevel(logrus.DebugLevel)
|
||||
l.SetReportCaller(true)
|
||||
} else {
|
||||
l.SetLevel(logrus.InfoLevel)
|
||||
l.SetReportCaller(false)
|
||||
}
|
||||
}
|
||||
func initLogrus() {
|
||||
formatter := logrus.TextFormatter{
|
||||
ForceColors: true,
|
||||
EnvironmentOverrideColors: true,
|
||||
TimestampFormat: "2006-01-02 15:04:05",
|
||||
FullTimestamp: true,
|
||||
}
|
||||
logrus.SetFormatter(&formatter)
|
||||
initLog(logrus.StandardLogger())
|
||||
}
|
||||
|
@ -1,10 +1,40 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
DataDir string
|
||||
ConfigFile string
|
||||
Debug bool
|
||||
NoPrefix bool
|
||||
Dev bool
|
||||
ForceBinDir bool
|
||||
LogStd bool
|
||||
|
||||
pwd string
|
||||
)
|
||||
|
||||
// Program working directory
|
||||
func PWD() string {
|
||||
if pwd != "" {
|
||||
return pwd
|
||||
}
|
||||
if ForceBinDir {
|
||||
ex, err := os.Executable()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
pwd = filepath.Dir(ex)
|
||||
return pwd
|
||||
}
|
||||
d, err := os.Getwd()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
pwd = d
|
||||
return d
|
||||
}
|
||||
|
11
cmd/root.go
11
cmd/root.go
@ -4,10 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/cmd/flags"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/internal/archive"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/internal/offline_download"
|
||||
"github.com/OpenListTeam/OpenList/v5/cmd/flags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -27,10 +24,10 @@ func Execute() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data folder")
|
||||
RootCmd.PersistentFlags().StringVarP(&flags.ConfigFile, "config", "c", "data/config.json", "config file")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.Debug, "debug", false, "start with debug mode")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.NoPrefix, "no-prefix", false, "disable env prefix")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.Dev, "dev", false, "start with dev mode")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.ForceBinDir, "force-bin-dir", false, "Force to use the directory where the binary file is located as data directory")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.LogStd, "log-std", false, "Force to log to std")
|
||||
RootCmd.PersistentFlags().BoolVarP(&flags.ForceBinDir, "force-bin-dir", "f", false, "force to use the directory where the binary file is located as data directory")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.LogStd, "log-std", false, "force to log to std")
|
||||
}
|
||||
|
195
cmd/server.go
195
cmd/server.go
@ -13,15 +13,9 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/cmd/flags"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/bootstrap"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/middlewares"
|
||||
"github.com/OpenListTeam/sftpd-openlist"
|
||||
ftpserver "github.com/fclairamb/ftpserverlib"
|
||||
"github.com/OpenListTeam/OpenList/v5/cmd/flags"
|
||||
"github.com/OpenListTeam/OpenList/v5/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v5/server"
|
||||
"github.com/gin-gonic/gin"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
@ -35,220 +29,127 @@ var ServerCmd = &cobra.Command{
|
||||
Short: "Start the server at the specified address",
|
||||
Long: `Start the server at the specified address
|
||||
the address is defined in config file`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Init()
|
||||
if conf.Conf.DelayedStart != 0 {
|
||||
utils.Log.Infof("delayed start for %d seconds", conf.Conf.DelayedStart)
|
||||
time.Sleep(time.Duration(conf.Conf.DelayedStart) * time.Second)
|
||||
}
|
||||
bootstrap.InitOfflineDownloadTools()
|
||||
bootstrap.LoadStorages()
|
||||
bootstrap.InitTaskManager()
|
||||
if !flags.Debug && !flags.Dev {
|
||||
Run: func(_ *cobra.Command, args []string) {
|
||||
serverCtx, serverCancel := context.WithCancel(context.Background())
|
||||
defer serverCancel()
|
||||
Init(serverCtx)
|
||||
|
||||
if !flags.Debug {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
}
|
||||
r := gin.New()
|
||||
|
||||
// gin log
|
||||
if conf.Conf.Log.Filter.Enable {
|
||||
r.Use(middlewares.FilteredLogger())
|
||||
} else {
|
||||
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out))
|
||||
}
|
||||
r.Use(gin.RecoveryWithWriter(log.StandardLogger().Out))
|
||||
|
||||
server.Init(r)
|
||||
|
||||
var httpHandler http.Handler = r
|
||||
if conf.Conf.Scheme.EnableH2c {
|
||||
httpHandler = h2c.NewHandler(r, &http2.Server{})
|
||||
}
|
||||
var httpSrv, httpsSrv, unixSrv *http.Server
|
||||
if conf.Conf.Scheme.HttpPort != -1 {
|
||||
if conf.Conf.Scheme.HttpPort > 0 {
|
||||
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
|
||||
fmt.Printf("start HTTP server @ %s\n", httpBase)
|
||||
utils.Log.Infof("start HTTP server @ %s", httpBase)
|
||||
log.Infoln("start HTTP server", "@", httpBase)
|
||||
httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler}
|
||||
go func() {
|
||||
err := httpSrv.ListenAndServe()
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
utils.Log.Fatalf("failed to start http: %s", err.Error())
|
||||
log.Errorln("start HTTP server", ":", err)
|
||||
serverCancel()
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.Scheme.HttpsPort != -1 {
|
||||
if conf.Conf.Scheme.HttpsPort > 0 {
|
||||
httpsBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpsPort)
|
||||
fmt.Printf("start HTTPS server @ %s\n", httpsBase)
|
||||
utils.Log.Infof("start HTTPS server @ %s", httpsBase)
|
||||
log.Infoln("start HTTPS server", "@", httpsBase)
|
||||
httpsSrv = &http.Server{Addr: httpsBase, Handler: r}
|
||||
go func() {
|
||||
err := httpsSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
utils.Log.Fatalf("failed to start https: %s", err.Error())
|
||||
log.Errorln("start HTTPS server", ":", err)
|
||||
serverCancel()
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.Scheme.UnixFile != "" {
|
||||
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
|
||||
utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile)
|
||||
log.Infoln("start Unix server", "@", conf.Conf.Scheme.UnixFile)
|
||||
unixSrv = &http.Server{Handler: httpHandler}
|
||||
go func() {
|
||||
listener, err := net.Listen("unix", conf.Conf.Scheme.UnixFile)
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to listen unix: %+v", err)
|
||||
log.Errorln("start Unix server", ":", err)
|
||||
serverCancel()
|
||||
return
|
||||
}
|
||||
// set socket file permission
|
||||
|
||||
mode, err := strconv.ParseUint(conf.Conf.Scheme.UnixFilePerm, 8, 32)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to parse socket file permission: %+v", err)
|
||||
log.Errorln("parse unix_file_perm", ":", err)
|
||||
} else {
|
||||
err = os.Chmod(conf.Conf.Scheme.UnixFile, os.FileMode(mode))
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to chmod socket file: %+v", err)
|
||||
log.Errorln("chmod socket file", ":", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = unixSrv.Serve(listener)
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
utils.Log.Fatalf("failed to start unix: %s", err.Error())
|
||||
log.Errorln("start Unix server", ":", err)
|
||||
serverCancel()
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.S3.Port != -1 && conf.Conf.S3.Enable {
|
||||
s3r := gin.New()
|
||||
s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
|
||||
server.InitS3(s3r)
|
||||
s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port)
|
||||
fmt.Printf("start S3 server @ %s\n", s3Base)
|
||||
utils.Log.Infof("start S3 server @ %s", s3Base)
|
||||
go func() {
|
||||
var err error
|
||||
if conf.Conf.S3.SSL {
|
||||
httpsSrv = &http.Server{Addr: s3Base, Handler: s3r}
|
||||
err = httpsSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
|
||||
}
|
||||
if !conf.Conf.S3.SSL {
|
||||
httpSrv = &http.Server{Addr: s3Base, Handler: s3r}
|
||||
err = httpSrv.ListenAndServe()
|
||||
}
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
utils.Log.Fatalf("failed to start s3 server: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
var ftpDriver *server.FtpMainDriver
|
||||
var ftpServer *ftpserver.FtpServer
|
||||
if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable {
|
||||
var err error
|
||||
ftpDriver, err = server.NewMainDriver()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to start ftp driver: %s", err.Error())
|
||||
} else {
|
||||
fmt.Printf("start ftp server on %s\n", conf.Conf.FTP.Listen)
|
||||
utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen)
|
||||
go func() {
|
||||
ftpServer = ftpserver.NewFtpServer(ftpDriver)
|
||||
err = ftpServer.ListenAndServe()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("problem ftp server listening: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
var sftpDriver *server.SftpDriver
|
||||
var sftpServer *sftpd.SftpServer
|
||||
if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable {
|
||||
var err error
|
||||
sftpDriver, err = server.NewSftpDriver()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to start sftp driver: %s", err.Error())
|
||||
} else {
|
||||
fmt.Printf("start sftp server on %s", conf.Conf.SFTP.Listen)
|
||||
utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen)
|
||||
go func() {
|
||||
sftpServer = sftpd.NewSftpServer(sftpDriver)
|
||||
err = sftpServer.RunServer()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("problem sftp server listening: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
// Wait for interrupt signal to gracefully shutdown the server with
|
||||
// a timeout of 1 second.
|
||||
|
||||
quit := make(chan os.Signal, 1)
|
||||
// kill (no param) default send syscanll.SIGTERM
|
||||
// kill -2 is syscall.SIGINT
|
||||
// kill -9 is syscall. SIGKILL but can"t be catch, so don't need add it
|
||||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-quit
|
||||
utils.Log.Println("Shutdown server...")
|
||||
fs.ArchiveContentUploadTaskManager.RemoveAll()
|
||||
select {
|
||||
case <-quit:
|
||||
case <-serverCtx.Done():
|
||||
}
|
||||
|
||||
log.Println("shutdown server...")
|
||||
Release()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
quitCtx, quitCancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer quitCancel()
|
||||
var wg sync.WaitGroup
|
||||
if conf.Conf.Scheme.HttpPort != -1 {
|
||||
if httpSrv != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := httpSrv.Shutdown(ctx); err != nil {
|
||||
utils.Log.Fatal("HTTP server shutdown err: ", err)
|
||||
if err := httpSrv.Shutdown(quitCtx); err != nil {
|
||||
log.Errorln("shutdown HTTP server", ":", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.Scheme.HttpsPort != -1 {
|
||||
if httpsSrv != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := httpsSrv.Shutdown(ctx); err != nil {
|
||||
utils.Log.Fatal("HTTPS server shutdown err: ", err)
|
||||
if err := httpsSrv.Shutdown(quitCtx); err != nil {
|
||||
log.Errorln("shutdown HTTPS server", ":", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.Scheme.UnixFile != "" {
|
||||
if unixSrv != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := unixSrv.Shutdown(ctx); err != nil {
|
||||
utils.Log.Fatal("Unix server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable && ftpServer != nil && ftpDriver != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ftpDriver.Stop()
|
||||
if err := ftpServer.Stop(); err != nil {
|
||||
utils.Log.Fatal("FTP server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable && sftpServer != nil && sftpDriver != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := sftpServer.Close(); err != nil {
|
||||
utils.Log.Fatal("SFTP server shutdown err: ", err)
|
||||
if err := unixSrv.Shutdown(quitCtx); err != nil {
|
||||
log.Errorln("shutdown Unix server", ":", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
utils.Log.Println("Server exit")
|
||||
log.Println("server exit")
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(ServerCmd)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
// and all subcommands, e.g.:
|
||||
// serverCmd.PersistentFlags().String("foo", "", "A help for foo")
|
||||
|
||||
// Cobra supports local flags which will only run when this command
|
||||
// is called directly, e.g.:
|
||||
// serverCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
}
|
||||
|
||||
// OutOpenListInit 暴露用于外部启动server的函数
|
||||
|
@ -1,162 +0,0 @@
|
||||
package _123
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
func (d *Pan123) getS3PreSignedUrls(ctx context.Context, upReq *UploadResp, start, end int) (*S3PreSignedURLs, error) {
|
||||
data := base.Json{
|
||||
"bucket": upReq.Data.Bucket,
|
||||
"key": upReq.Data.Key,
|
||||
"partNumberEnd": end,
|
||||
"partNumberStart": start,
|
||||
"uploadId": upReq.Data.UploadId,
|
||||
"StorageNode": upReq.Data.StorageNode,
|
||||
}
|
||||
var s3PreSignedUrls S3PreSignedURLs
|
||||
_, err := d.Request(S3PreSignedUrls, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetContext(ctx)
|
||||
}, &s3PreSignedUrls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s3PreSignedUrls, nil
|
||||
}
|
||||
|
||||
func (d *Pan123) getS3Auth(ctx context.Context, upReq *UploadResp, start, end int) (*S3PreSignedURLs, error) {
|
||||
data := base.Json{
|
||||
"StorageNode": upReq.Data.StorageNode,
|
||||
"bucket": upReq.Data.Bucket,
|
||||
"key": upReq.Data.Key,
|
||||
"partNumberEnd": end,
|
||||
"partNumberStart": start,
|
||||
"uploadId": upReq.Data.UploadId,
|
||||
}
|
||||
var s3PreSignedUrls S3PreSignedURLs
|
||||
_, err := d.Request(S3Auth, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetContext(ctx)
|
||||
}, &s3PreSignedUrls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s3PreSignedUrls, nil
|
||||
}
|
||||
|
||||
func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.FileStreamer, isMultipart bool) error {
|
||||
data := base.Json{
|
||||
"StorageNode": upReq.Data.StorageNode,
|
||||
"bucket": upReq.Data.Bucket,
|
||||
"fileId": upReq.Data.FileId,
|
||||
"fileSize": file.GetSize(),
|
||||
"isMultipart": isMultipart,
|
||||
"key": upReq.Data.Key,
|
||||
"uploadId": upReq.Data.UploadId,
|
||||
}
|
||||
_, err := d.Request(UploadCompleteV2, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetContext(ctx)
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tmpF, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// fetch s3 pre signed urls
|
||||
size := file.GetSize()
|
||||
chunkSize := min(size, 16*utils.MB)
|
||||
chunkCount := int(size / chunkSize)
|
||||
lastChunkSize := size % chunkSize
|
||||
if lastChunkSize > 0 {
|
||||
chunkCount++
|
||||
} else {
|
||||
lastChunkSize = chunkSize
|
||||
}
|
||||
// only 1 batch is allowed
|
||||
batchSize := 1
|
||||
getS3UploadUrl := d.getS3Auth
|
||||
if chunkCount > 1 {
|
||||
batchSize = 10
|
||||
getS3UploadUrl = d.getS3PreSignedUrls
|
||||
}
|
||||
for i := 1; i <= chunkCount; i += batchSize {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
start := i
|
||||
end := min(i+batchSize, chunkCount+1)
|
||||
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// upload each chunk
|
||||
for j := start; j < end; j++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
curSize := chunkSize
|
||||
if j == chunkCount {
|
||||
curSize = lastChunkSize
|
||||
}
|
||||
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.NewSectionReader(tmpF, chunkSize*int64(j-1), curSize), curSize, false, getS3UploadUrl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(float64(j) * 100 / float64(chunkCount))
|
||||
}
|
||||
}
|
||||
// complete s3 upload
|
||||
return d.completeS3(ctx, upReq, file, chunkCount > 1)
|
||||
}
|
||||
|
||||
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader *io.SectionReader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
|
||||
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
|
||||
if uploadUrl == "" {
|
||||
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
|
||||
}
|
||||
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, reader))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = curSize
|
||||
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusForbidden {
|
||||
if retry {
|
||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
|
||||
}
|
||||
// refresh s3 pre signed urls
|
||||
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
|
||||
// retry
|
||||
reader.Seek(0, io.SeekStart)
|
||||
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,151 +0,0 @@
|
||||
package _123_open
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
func (d *Open123) create(parentFileID int64, filename string, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) {
|
||||
var resp UploadCreateResp
|
||||
_, err := d.Request(UploadCreate, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"parentFileId": parentFileID,
|
||||
"filename": filename,
|
||||
"etag": strings.ToLower(etag),
|
||||
"size": size,
|
||||
"duplicate": duplicate,
|
||||
"containDir": containDir,
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) url(preuploadID string, sliceNo int64) (string, error) {
|
||||
// get upload url
|
||||
var resp UploadUrlResp
|
||||
_, err := d.Request(UploadUrl, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"preuploadId": preuploadID,
|
||||
"sliceNo": sliceNo,
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return resp.Data.PresignedURL, nil
|
||||
}
|
||||
|
||||
func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
|
||||
var resp UploadCompleteResp
|
||||
_, err := d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"preuploadID": preuploadID,
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) async(preuploadID string) (*UploadAsyncResp, error) {
|
||||
var resp UploadAsyncResp
|
||||
_, err := d.Request(UploadAsync, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"preuploadID": preuploadID,
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
|
||||
size := file.GetSize()
|
||||
chunkSize := createResp.Data.SliceSize
|
||||
uploadNums := (size + chunkSize - 1) / chunkSize
|
||||
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.UploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
for partIndex := int64(0); partIndex < uploadNums; partIndex++ {
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
partIndex := partIndex
|
||||
partNumber := partIndex + 1 // 分片号从1开始
|
||||
offset := partIndex * chunkSize
|
||||
size := min(chunkSize, size-offset)
|
||||
limitedReader, err := file.RangeRead(http_range.Range{
|
||||
Start: offset,
|
||||
Length: size})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
limitedReader = driver.NewLimitedUploadStream(ctx, limitedReader)
|
||||
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
uploadPartUrl, err := d.url(createResp.Data.PreuploadID, partNumber)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "PUT", uploadPartUrl, limitedReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = size
|
||||
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
|
||||
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
|
||||
up(progress)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := threadG.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uploadCompleteResp.Data.Async == false || uploadCompleteResp.Data.Completed {
|
||||
return nil
|
||||
}
|
||||
|
||||
for {
|
||||
uploadAsyncResp, err := d.async(createResp.Data.PreuploadID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uploadAsyncResp.Data.Completed {
|
||||
break
|
||||
}
|
||||
}
|
||||
up(100)
|
||||
return nil
|
||||
}
|
@ -1,126 +0,0 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||
"github.com/jlaffaye/ftp"
|
||||
)
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
func (d *FTP) login() error {
|
||||
err, _, _ := singleflight.ErrorGroup.Do(fmt.Sprintf("FTP.login:%p", d), func() (error, error) {
|
||||
return d._login(), nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *FTP) _login() error {
|
||||
|
||||
if d.conn != nil {
|
||||
_, err := d.conn.CurrentDir()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = conn.Login(d.Username, d.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.conn = conn
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileReader An FTP file reader that implements io.MFile for seeking.
|
||||
type FileReader struct {
|
||||
conn *ftp.ServerConn
|
||||
resp *ftp.Response
|
||||
offset atomic.Int64
|
||||
readAtOffset int64
|
||||
mu sync.Mutex
|
||||
path string
|
||||
size int64
|
||||
}
|
||||
|
||||
func NewFileReader(conn *ftp.ServerConn, path string, size int64) *FileReader {
|
||||
return &FileReader{
|
||||
conn: conn,
|
||||
path: path,
|
||||
size: size,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *FileReader) Read(buf []byte) (n int, err error) {
|
||||
n, err = r.ReadAt(buf, r.offset.Load())
|
||||
r.offset.Add(int64(n))
|
||||
return
|
||||
}
|
||||
|
||||
func (r *FileReader) ReadAt(buf []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return -1, os.ErrInvalid
|
||||
}
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if off != r.readAtOffset {
|
||||
//have to restart the connection, to correct offset
|
||||
_ = r.resp.Close()
|
||||
r.resp = nil
|
||||
}
|
||||
|
||||
if r.resp == nil {
|
||||
r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
|
||||
r.readAtOffset = off
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
n, err = r.resp.Read(buf)
|
||||
r.readAtOffset += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
oldOffset := r.offset.Load()
|
||||
var newOffset int64
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
newOffset = offset
|
||||
case io.SeekCurrent:
|
||||
newOffset = oldOffset + offset
|
||||
case io.SeekEnd:
|
||||
return r.size, nil
|
||||
default:
|
||||
return -1, os.ErrInvalid
|
||||
}
|
||||
|
||||
if newOffset < 0 {
|
||||
// offset out of range
|
||||
return oldOffset, os.ErrInvalid
|
||||
}
|
||||
if newOffset == oldOffset {
|
||||
// offset not changed, so return directly
|
||||
return oldOffset, nil
|
||||
}
|
||||
r.offset.Store(newOffset)
|
||||
return newOffset, nil
|
||||
}
|
||||
|
||||
func (r *FileReader) Close() error {
|
||||
if r.resp != nil {
|
||||
return r.resp.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,155 +0,0 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/disintegration/imaging"
|
||||
ffmpeg "github.com/u2takey/ffmpeg-go"
|
||||
)
|
||||
|
||||
func isSymlinkDir(f fs.FileInfo, path string) bool {
|
||||
if f.Mode()&os.ModeSymlink == os.ModeSymlink ||
|
||||
(runtime.GOOS == "windows" && f.Mode()&os.ModeIrregular == os.ModeIrregular) { // os.ModeIrregular is Junction bit in Windows
|
||||
dst, err := os.Readlink(filepath.Join(path, f.Name()))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if !filepath.IsAbs(dst) {
|
||||
dst = filepath.Join(path, dst)
|
||||
}
|
||||
stat, err := os.Stat(dst)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return stat.IsDir()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Get the snapshot of the video
|
||||
func (d *Local) GetSnapshot(videoPath string) (imgData *bytes.Buffer, err error) {
|
||||
// Run ffprobe to get the video duration
|
||||
jsonOutput, err := ffmpeg.Probe(videoPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// get format.duration from the json string
|
||||
type probeFormat struct {
|
||||
Duration string `json:"duration"`
|
||||
}
|
||||
type probeData struct {
|
||||
Format probeFormat `json:"format"`
|
||||
}
|
||||
var probe probeData
|
||||
err = json.Unmarshal([]byte(jsonOutput), &probe)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalDuration, err := strconv.ParseFloat(probe.Format.Duration, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ss string
|
||||
if d.videoThumbPosIsPercentage {
|
||||
ss = fmt.Sprintf("%f", totalDuration*d.videoThumbPos)
|
||||
} else {
|
||||
// If the value is greater than the total duration, use the total duration
|
||||
if d.videoThumbPos > totalDuration {
|
||||
ss = fmt.Sprintf("%f", totalDuration)
|
||||
} else {
|
||||
ss = fmt.Sprintf("%f", d.videoThumbPos)
|
||||
}
|
||||
}
|
||||
|
||||
// Run ffmpeg to get the snapshot
|
||||
srcBuf := bytes.NewBuffer(nil)
|
||||
// If the remaining time from the seek point to the end of the video is less
|
||||
// than the duration of a single frame, ffmpeg cannot extract any frames
|
||||
// within the specified range and will exit with an error.
|
||||
// The "noaccurate_seek" option prevents this error and would also speed up
|
||||
// the seek process.
|
||||
stream := ffmpeg.Input(videoPath, ffmpeg.KwArgs{"ss": ss, "noaccurate_seek": ""}).
|
||||
Output("pipe:", ffmpeg.KwArgs{"vframes": 1, "format": "image2", "vcodec": "mjpeg"}).
|
||||
GlobalArgs("-loglevel", "error").Silent(true).
|
||||
WithOutput(srcBuf, os.Stdout)
|
||||
if err = stream.Run(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return srcBuf, nil
|
||||
}
|
||||
|
||||
func readDir(dirname string) ([]fs.FileInfo, error) {
|
||||
f, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list, err := f.Readdir(-1)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sort.Slice(list, func(i, j int) bool { return list[i].Name() < list[j].Name() })
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
|
||||
fullPath := file.GetPath()
|
||||
thumbPrefix := "openlist_thumb_"
|
||||
thumbName := thumbPrefix + utils.GetMD5EncodeStr(fullPath) + ".png"
|
||||
if d.ThumbCacheFolder != "" {
|
||||
// skip if the file is a thumbnail
|
||||
if strings.HasPrefix(file.GetName(), thumbPrefix) {
|
||||
return nil, &fullPath, nil
|
||||
}
|
||||
thumbPath := filepath.Join(d.ThumbCacheFolder, thumbName)
|
||||
if utils.Exists(thumbPath) {
|
||||
return nil, &thumbPath, nil
|
||||
}
|
||||
}
|
||||
var srcBuf *bytes.Buffer
|
||||
if utils.GetFileType(file.GetName()) == conf.VIDEO {
|
||||
videoBuf, err := d.GetSnapshot(fullPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
srcBuf = videoBuf
|
||||
} else {
|
||||
imgData, err := os.ReadFile(fullPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
imgBuf := bytes.NewBuffer(imgData)
|
||||
srcBuf = imgBuf
|
||||
}
|
||||
|
||||
image, err := imaging.Decode(srcBuf, imaging.AutoOrientation(true))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
thumbImg := imaging.Resize(image, 144, 0, imaging.Lanczos)
|
||||
var buf bytes.Buffer
|
||||
err = imaging.Encode(&buf, thumbImg, imaging.PNG)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if d.ThumbCacheFolder != "" {
|
||||
err = os.WriteFile(filepath.Join(d.ThumbCacheFolder, thumbName), buf.Bytes(), 0666)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return &buf, nil, nil
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
package strm
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
Paths string `json:"paths" required:"true" type:"text"`
|
||||
SiteUrl string `json:"siteUrl" type:"text" required:"false" help:"The prefix URL of the strm file"`
|
||||
FilterFileTypes string `json:"filterFileTypes" type:"text" default:"strm" required:"false" help:"Supports suffix name of strm file"`
|
||||
EncodePath bool `json:"encodePath" default:"true" required:"true" help:"encode the path in the strm file"`
|
||||
LocalModel bool `json:"localModel" default:"false" help:"enable local mode"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Strm",
|
||||
LocalSort: true,
|
||||
NoCache: true,
|
||||
NoUpload: true,
|
||||
DefaultRoot: "/",
|
||||
OnlyLinkMFile: true,
|
||||
OnlyProxy: true,
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Strm{
|
||||
Addition: Addition{
|
||||
EncodePath: true,
|
||||
},
|
||||
}
|
||||
})
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
package strm
|
||||
|
||||
var supportSuffix = map[string]struct{}{
|
||||
// video
|
||||
"mp4": {},
|
||||
"mkv": {},
|
||||
"flv": {},
|
||||
"avi": {},
|
||||
"wmv": {},
|
||||
"ts": {},
|
||||
"rmvb": {},
|
||||
"webm": {},
|
||||
// audio
|
||||
"mp3": {},
|
||||
"flac": {},
|
||||
"aac": {},
|
||||
"wav": {},
|
||||
"ogg": {},
|
||||
"m4a": {},
|
||||
"wma": {},
|
||||
"alac": {},
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
umask ${UMASK}
|
||||
|
||||
if [ "$1" = "version" ]; then
|
||||
./openlist version
|
||||
else
|
||||
if [ "$RUN_ARIA2" = "true" ]; then
|
||||
cp -a /opt/service/stop/aria2 /opt/service/start 2>/dev/null
|
||||
fi
|
||||
|
||||
chown -R ${PUID}:${PGID} /opt
|
||||
exec su-exec ${PUID}:${PGID} runsvdir /opt/service/start
|
||||
fi
|
266
go.mod
266
go.mod
@ -1,272 +1,50 @@
|
||||
module github.com/OpenListTeam/OpenList/v4
|
||||
module github.com/OpenListTeam/OpenList/v5
|
||||
|
||||
go 1.23.4
|
||||
go 1.24
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2
|
||||
github.com/OpenListTeam/go-cache v0.1.0
|
||||
github.com/OpenListTeam/rateg v0.1.0
|
||||
github.com/OpenListTeam/sftpd-openlist v1.0.1
|
||||
github.com/OpenListTeam/tache v0.2.0
|
||||
github.com/OpenListTeam/times v0.1.0
|
||||
github.com/OpenListTeam/wopan-sdk-go v0.1.5
|
||||
github.com/ProtonMail/go-crypto v1.3.0
|
||||
github.com/SheltonZhu/115driver v1.1.0
|
||||
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
|
||||
github.com/avast/retry-go v3.0.0+incompatible
|
||||
github.com/aws/aws-sdk-go v1.55.7
|
||||
github.com/blevesearch/bleve/v2 v2.5.2
|
||||
github.com/caarlos0/env/v9 v9.0.0
|
||||
github.com/charmbracelet/bubbles v0.21.0
|
||||
github.com/charmbracelet/bubbletea v1.3.6
|
||||
github.com/charmbracelet/lipgloss v1.1.0
|
||||
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e
|
||||
github.com/coreos/go-oidc v2.3.0+incompatible
|
||||
github.com/deckarep/golang-set/v2 v2.8.0
|
||||
github.com/dhowden/tag v0.0.0-20240417053706-3d75831295e8
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
github.com/dlclark/regexp2 v1.11.5
|
||||
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564
|
||||
github.com/fclairamb/ftpserverlib v0.26.1-0.20250709223522-4a925d79caf6
|
||||
github.com/foxxorcat/mopan-sdk-go v0.1.6
|
||||
github.com/foxxorcat/weiyun-sdk-go v0.1.3
|
||||
github.com/gin-contrib/cors v1.7.6
|
||||
github.com/gin-gonic/gin v1.10.1
|
||||
github.com/go-resty/resty/v2 v2.16.5
|
||||
github.com/go-webauthn/webauthn v0.13.4
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0
|
||||
github.com/hirochachacha/go-smb2 v1.1.0
|
||||
github.com/ipfs/go-ipfs-api v0.7.0
|
||||
github.com/itsHenry35/gofakes3 v0.0.8
|
||||
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3
|
||||
github.com/hashicorp/go-plugin v1.7.0
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/kdomanski/iso9660 v0.4.0
|
||||
github.com/maruel/natural v1.1.1
|
||||
github.com/meilisearch/meilisearch-go v0.32.0
|
||||
github.com/mholt/archives v0.1.3
|
||||
github.com/natefinch/lumberjack v2.0.0+incompatible
|
||||
github.com/ncw/swift/v2 v2.0.4
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.13.9
|
||||
github.com/pquerna/otp v1.5.0
|
||||
github.com/rclone/rclone v1.70.3
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.14.0
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5
|
||||
github.com/u2takey/ffmpeg-go v0.5.0
|
||||
github.com/upyun/go-sdk/v3 v3.0.4
|
||||
github.com/winfsp/cgofuse v1.6.0
|
||||
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9
|
||||
github.com/zzzhr1990/go-common-entity v0.0.0-20250202070650-1a200048f0d3
|
||||
golang.org/x/crypto v0.40.0
|
||||
golang.org/x/image v0.29.0
|
||||
golang.org/x/net v0.42.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/time v0.12.0
|
||||
google.golang.org/appengine v1.6.8
|
||||
gopkg.in/ldap.v3 v3.1.0
|
||||
gorm.io/driver/mysql v1.5.7
|
||||
gorm.io/driver/postgres v1.5.9
|
||||
gorm.io/driver/sqlite v1.5.6
|
||||
gorm.io/gorm v1.25.11
|
||||
golang.org/x/net v0.43.0
|
||||
google.golang.org/grpc v1.74.2
|
||||
google.golang.org/protobuf v1.36.7
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||
github.com/RoaringBitmap/roaring/v2 v2.4.5 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/ebitengine/purego v0.8.4 // indirect
|
||||
github.com/lanrat/extsort v1.0.2 // indirect
|
||||
github.com/mikelolasagasti/xz v1.0.1 // indirect
|
||||
github.com/minio/minlz v1.0.0 // indirect
|
||||
github.com/minio/xxml v0.0.3 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/otiai10/mint v1.6.3 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.25.5 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/OpenListTeam/115-sdk-go v0.2.2
|
||||
github.com/STARRY-S/zip v0.2.1 // indirect
|
||||
github.com/aymerick/douceur v0.2.0 // indirect
|
||||
github.com/blevesearch/go-faiss v1.0.25 // indirect
|
||||
github.com/blevesearch/zapx/v16 v16.2.4 // indirect
|
||||
github.com/bodgit/plumbing v1.3.0 // indirect
|
||||
github.com/bodgit/sevenzip v1.6.1
|
||||
github.com/bodgit/windows v1.0.1 // indirect
|
||||
github.com/bytedance/sonic/loader v0.2.4 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.9.3 // indirect
|
||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||
github.com/cloudflare/circl v1.6.1 // indirect
|
||||
github.com/cloudwego/base64x v0.1.5 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
github.com/fclairamb/go-log v0.6.0 // indirect
|
||||
github.com/gorilla/css v1.0.1 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hekmon/cunits/v2 v2.1.0 // indirect
|
||||
github.com/ipfs/boxo v0.12.0 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/matoous/go-nanoid/v2 v2.1.0 // indirect
|
||||
github.com/microcosm-cc/bluemonday v1.0.27
|
||||
github.com/nwaples/rardecode/v2 v2.1.1
|
||||
github.com/sorairolake/lzip-go v0.3.5 // indirect
|
||||
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/yuin/goldmark v1.7.13
|
||||
go4.org v0.0.0-20230225012048-214862532bf5
|
||||
resty.dev/v3 v3.0.0-beta.2 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd // indirect
|
||||
github.com/OpenListTeam/gsync v0.1.0 // indirect
|
||||
github.com/abbot/go-http-auth v0.4.0 // indirect
|
||||
github.com/aead/ecdh v0.2.0 // indirect
|
||||
github.com/andreburgaud/crypt2go v1.8.0 // indirect
|
||||
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 // indirect
|
||||
github.com/axgle/mahonia v0.0.0-20180208002826-3358181d7394
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/benbjohnson/clock v1.3.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.22.0 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/blevesearch/bleve_index_api v1.2.8 // indirect
|
||||
github.com/blevesearch/geo v0.2.3 // indirect
|
||||
github.com/blevesearch/go-porterstemmer v1.0.3 // indirect
|
||||
github.com/blevesearch/gtreap v0.1.1 // indirect
|
||||
github.com/blevesearch/mmap-go v1.0.4 // indirect
|
||||
github.com/blevesearch/scorch_segment_api/v2 v2.3.10 // indirect
|
||||
github.com/blevesearch/segment v0.9.1 // indirect
|
||||
github.com/blevesearch/snowballstem v0.9.0 // indirect
|
||||
github.com/blevesearch/upsidedown_store_api v1.0.2 // indirect
|
||||
github.com/blevesearch/vellum v1.1.0 // indirect
|
||||
github.com/blevesearch/zapx/v11 v11.4.2 // indirect
|
||||
github.com/blevesearch/zapx/v12 v12.4.2 // indirect
|
||||
github.com/blevesearch/zapx/v13 v13.4.2 // indirect
|
||||
github.com/blevesearch/zapx/v14 v14.4.2 // indirect
|
||||
github.com/blevesearch/zapx/v15 v15.4.2 // indirect
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
|
||||
github.com/bytedance/sonic v1.13.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/bytedance/sonic v1.14.0 // indirect
|
||||
github.com/bytedance/sonic/loader v0.3.0 // indirect
|
||||
github.com/cloudwego/base64x v0.1.6 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
|
||||
github.com/geoffgarside/ber v1.2.0 // indirect
|
||||
github.com/gin-contrib/sse v1.1.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.26.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.0 // indirect
|
||||
github.com/go-webauthn/x v0.1.23 // indirect
|
||||
github.com/go-playground/validator/v10 v10.27.0 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.3 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/go-tpm v0.9.5 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/hashicorp/go-hclog v1.6.3 // indirect
|
||||
github.com/hashicorp/yamux v0.1.2 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/ipfs/go-cid v0.5.0
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.5 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
|
||||
github.com/libp2p/go-libp2p v0.27.8 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.22 // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/mschoch/smat v0.2.0 // indirect
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||
github.com/muesli/termenv v0.16.0 // indirect
|
||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multiaddr v0.9.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||
github.com/multiformats/go-multicodec v0.9.0 // indirect
|
||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||
github.com/multiformats/go-multistream v0.4.1 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
github.com/otiai10/copy v1.14.1
|
||||
github.com/oklog/run v1.2.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/pquerna/cachecontrol v0.1.0 // indirect
|
||||
github.com/prometheus/client_golang v1.22.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.64.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/rfjakob/eme v1.1.2 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df // indirect
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.15 // indirect
|
||||
github.com/tklauser/numcpus v0.10.0 // indirect
|
||||
github.com/spf13/pflag v1.0.7 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/u2takey/go-utils v0.3.1 // indirect
|
||||
github.com/ugorji/go/codec v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.etcd.io/bbolt v1.4.0 // indirect
|
||||
golang.org/x/arch v0.18.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
golang.org/x/term v0.33.0 // indirect
|
||||
golang.org/x/text v0.27.0
|
||||
golang.org/x/tools v0.34.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
golang.org/x/arch v0.20.0 // indirect
|
||||
golang.org/x/crypto v0.41.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
golang.org/x/text v0.28.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
lukechampine.com/blake3 v1.1.7 // indirect
|
||||
)
|
||||
|
||||
// replace github.com/OpenListTeam/115-sdk-go => ../../OpenListTeam/115-sdk-go
|
||||
|
@ -6,139 +6,68 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/cmd/flags"
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/net"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/caarlos0/env/v9"
|
||||
"github.com/OpenListTeam/OpenList/v5/cmd/flags"
|
||||
"github.com/OpenListTeam/OpenList/v5/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v5/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Program working directory
|
||||
func PWD() string {
|
||||
if flags.ForceBinDir {
|
||||
ex, err := os.Executable()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
pwd := filepath.Dir(ex)
|
||||
return pwd
|
||||
}
|
||||
d, err := os.Getwd()
|
||||
if err != nil {
|
||||
d = "."
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func InitConfig() {
|
||||
pwd := PWD()
|
||||
dataDir := flags.DataDir
|
||||
if !filepath.IsAbs(dataDir) {
|
||||
flags.DataDir = filepath.Join(pwd, flags.DataDir)
|
||||
if !filepath.IsAbs(flags.ConfigFile) {
|
||||
flags.ConfigFile = filepath.Join(flags.PWD(), flags.ConfigFile)
|
||||
}
|
||||
configPath := filepath.Join(flags.DataDir, "config.json")
|
||||
log.Infof("reading config file: %s", configPath)
|
||||
if !utils.Exists(configPath) {
|
||||
log.Infof("config file not exists, creating default config file")
|
||||
_, err := utils.CreateNestedFile(configPath)
|
||||
log.Infoln("reading config file", "@", flags.ConfigFile)
|
||||
|
||||
if !utils.Exists(flags.ConfigFile) {
|
||||
log.Infoln("config file not exists, creating default config file")
|
||||
_, err := utils.CreateNestedFile(flags.ConfigFile)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create config file: %+v", err)
|
||||
log.Fatalln("create config file", ":", err)
|
||||
}
|
||||
conf.Conf = conf.DefaultConfig(dataDir)
|
||||
LastLaunchedVersion = conf.Version
|
||||
conf.Conf.LastLaunchedVersion = conf.Version
|
||||
if !utils.WriteJsonToFile(configPath, conf.Conf) {
|
||||
log.Fatalf("failed to create default config file")
|
||||
conf.Conf = conf.DefaultConfig()
|
||||
err = utils.WriteJsonToFile(flags.ConfigFile, conf.Conf)
|
||||
if err != nil {
|
||||
log.Fatalln("save default config file", ":", err)
|
||||
}
|
||||
} else {
|
||||
configBytes, err := os.ReadFile(configPath)
|
||||
configBytes, err := os.ReadFile(flags.ConfigFile)
|
||||
if err != nil {
|
||||
log.Fatalf("reading config file error: %+v", err)
|
||||
log.Fatalln("reading config file", ":", err)
|
||||
}
|
||||
conf.Conf = conf.DefaultConfig(dataDir)
|
||||
conf.Conf = conf.DefaultConfig()
|
||||
err = utils.Json.Unmarshal(configBytes, conf.Conf)
|
||||
if err != nil {
|
||||
log.Fatalf("load config error: %+v", err)
|
||||
log.Fatalln("unmarshal config", ":", err)
|
||||
}
|
||||
LastLaunchedVersion = conf.Conf.LastLaunchedVersion
|
||||
if strings.HasPrefix(conf.Version, "v") || LastLaunchedVersion == "" {
|
||||
conf.Conf.LastLaunchedVersion = conf.Version
|
||||
}
|
||||
// update config.json struct
|
||||
confBody, err := utils.Json.MarshalIndent(conf.Conf, "", " ")
|
||||
err = utils.WriteJsonToFile(flags.ConfigFile, conf.Conf)
|
||||
if err != nil {
|
||||
log.Fatalf("marshal config error: %+v", err)
|
||||
}
|
||||
err = os.WriteFile(configPath, confBody, 0o777)
|
||||
if err != nil {
|
||||
log.Fatalf("update config struct error: %+v", err)
|
||||
log.Fatalln("update config file", ":", err)
|
||||
}
|
||||
}
|
||||
if conf.Conf.MaxConcurrency > 0 {
|
||||
net.DefaultConcurrencyLimit = &net.ConcurrencyLimit{Limit: conf.Conf.MaxConcurrency}
|
||||
}
|
||||
if !conf.Conf.Force {
|
||||
confFromEnv()
|
||||
}
|
||||
if len(conf.Conf.Log.Filter.Filters) == 0 {
|
||||
conf.Conf.Log.Filter.Enable = false
|
||||
}
|
||||
|
||||
// convert abs path
|
||||
configDir := filepath.Dir(flags.ConfigFile)
|
||||
convertAbsPath := func(path *string) {
|
||||
if !filepath.IsAbs(*path) {
|
||||
*path = filepath.Join(pwd, *path)
|
||||
if *path != "" && !filepath.IsAbs(*path) {
|
||||
*path = filepath.Join(configDir, *path)
|
||||
}
|
||||
}
|
||||
convertAbsPath(&conf.Conf.TempDir)
|
||||
convertAbsPath(&conf.Conf.BleveDir)
|
||||
convertAbsPath(&conf.Conf.Log.Name)
|
||||
convertAbsPath(&conf.Conf.Database.DBFile)
|
||||
if conf.Conf.DistDir != "" {
|
||||
convertAbsPath(&conf.Conf.DistDir)
|
||||
}
|
||||
err := os.MkdirAll(conf.Conf.TempDir, 0o777)
|
||||
if err != nil {
|
||||
log.Fatalf("create temp dir error: %+v", err)
|
||||
}
|
||||
convertAbsPath(&conf.Conf.Scheme.CertFile)
|
||||
convertAbsPath(&conf.Conf.Scheme.KeyFile)
|
||||
convertAbsPath(&conf.Conf.Scheme.UnixFile)
|
||||
log.Debugf("config: %+v", conf.Conf)
|
||||
base.InitClient()
|
||||
initURL()
|
||||
|
||||
initSitePath()
|
||||
}
|
||||
|
||||
func confFromEnv() {
|
||||
prefix := "OPENLIST_"
|
||||
if flags.NoPrefix {
|
||||
prefix = ""
|
||||
}
|
||||
log.Infof("load config from env with prefix: %s", prefix)
|
||||
if err := env.ParseWithOptions(conf.Conf, env.Options{
|
||||
Prefix: prefix,
|
||||
}); err != nil {
|
||||
log.Fatalf("load config from env error: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func initURL() {
|
||||
func initSitePath() {
|
||||
if !strings.Contains(conf.Conf.SiteURL, "://") {
|
||||
conf.Conf.SiteURL = utils.FixAndCleanPath(conf.Conf.SiteURL)
|
||||
}
|
||||
u, err := url.Parse(conf.Conf.SiteURL)
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("can't parse site_url: %+v", err)
|
||||
}
|
||||
conf.URL = u
|
||||
}
|
||||
|
||||
func CleanTempDir() {
|
||||
files, err := os.ReadDir(conf.Conf.TempDir)
|
||||
if err != nil {
|
||||
log.Errorln("failed list temp file: ", err)
|
||||
}
|
||||
for _, file := range files {
|
||||
if err := os.RemoveAll(filepath.Join(conf.Conf.TempDir, file.Name())); err != nil {
|
||||
log.Errorln("failed delete temp file: ", err)
|
||||
}
|
||||
log.Fatalln("parse site_url", ":", err)
|
||||
}
|
||||
conf.SitePath = u.Path
|
||||
}
|
||||
|
13
internal/bootstrap/driver.go
Normal file
13
internal/bootstrap/driver.go
Normal file
@ -0,0 +1,13 @@
|
||||
package bootstrap
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v5/internal/driver"
|
||||
driverS "github.com/OpenListTeam/OpenList/v5/shared/driver"
|
||||
"github.com/hashicorp/go-plugin"
|
||||
)
|
||||
|
||||
func InitDriverPlugins() {
|
||||
driver.PluginMap = map[string]plugin.Plugin{
|
||||
"grpc": &driverS.Plugin{},
|
||||
}
|
||||
}
|
@ -1,34 +1,9 @@
|
||||
package conf
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils/random"
|
||||
)
|
||||
|
||||
type Database struct {
|
||||
Type string `json:"type" env:"TYPE"`
|
||||
Host string `json:"host" env:"HOST"`
|
||||
Port int `json:"port" env:"PORT"`
|
||||
User string `json:"user" env:"USER"`
|
||||
Password string `json:"password" env:"PASS"`
|
||||
Name string `json:"name" env:"NAME"`
|
||||
DBFile string `json:"db_file" env:"FILE"`
|
||||
TablePrefix string `json:"table_prefix" env:"TABLE_PREFIX"`
|
||||
SSLMode string `json:"ssl_mode" env:"SSL_MODE"`
|
||||
DSN string `json:"dsn" env:"DSN"`
|
||||
}
|
||||
|
||||
type Meilisearch struct {
|
||||
Host string `json:"host" env:"HOST"`
|
||||
APIKey string `json:"api_key" env:"API_KEY"`
|
||||
Index string `json:"index" env:"INDEX"`
|
||||
}
|
||||
|
||||
type Scheme struct {
|
||||
Address string `json:"address" env:"ADDR"`
|
||||
HttpPort int `json:"http_port" env:"HTTP_PORT"`
|
||||
HttpsPort int `json:"https_port" env:"HTTPS_PORT"`
|
||||
HttpPort uint16 `json:"http_port" env:"HTTP_PORT"`
|
||||
HttpsPort uint16 `json:"https_port" env:"HTTPS_PORT"`
|
||||
ForceHttps bool `json:"force_https" env:"FORCE_HTTPS"`
|
||||
CertFile string `json:"cert_file" env:"CERT_FILE"`
|
||||
KeyFile string `json:"key_file" env:"KEY_FILE"`
|
||||
@ -36,208 +11,30 @@ type Scheme struct {
|
||||
UnixFilePerm string `json:"unix_file_perm" env:"UNIX_FILE_PERM"`
|
||||
EnableH2c bool `json:"enable_h2c" env:"ENABLE_H2C"`
|
||||
}
|
||||
|
||||
type LogConfig struct {
|
||||
Enable bool `json:"enable" env:"ENABLE"`
|
||||
Name string `json:"name" env:"NAME"`
|
||||
MaxSize int `json:"max_size" env:"MAX_SIZE"`
|
||||
MaxBackups int `json:"max_backups" env:"MAX_BACKUPS"`
|
||||
MaxAge int `json:"max_age" env:"MAX_AGE"`
|
||||
Compress bool `json:"compress" env:"COMPRESS"`
|
||||
Filter LogFilterConfig `json:"filter" envPrefix:"FILTER_"`
|
||||
}
|
||||
|
||||
type LogFilterConfig struct {
|
||||
Enable bool `json:"enable" env:"ENABLE"`
|
||||
Filters []Filter `json:"filters"`
|
||||
}
|
||||
|
||||
type Filter struct {
|
||||
CIDR string `json:"cidr"`
|
||||
Path string `json:"path"`
|
||||
Method string `json:"method"`
|
||||
}
|
||||
|
||||
type TaskConfig struct {
|
||||
Workers int `json:"workers" env:"WORKERS"`
|
||||
MaxRetry int `json:"max_retry" env:"MAX_RETRY"`
|
||||
TaskPersistant bool `json:"task_persistant" env:"TASK_PERSISTANT"`
|
||||
}
|
||||
|
||||
type TasksConfig struct {
|
||||
Download TaskConfig `json:"download" envPrefix:"DOWNLOAD_"`
|
||||
Transfer TaskConfig `json:"transfer" envPrefix:"TRANSFER_"`
|
||||
Upload TaskConfig `json:"upload" envPrefix:"UPLOAD_"`
|
||||
Copy TaskConfig `json:"copy" envPrefix:"COPY_"`
|
||||
Move TaskConfig `json:"move" envPrefix:"MOVE_"`
|
||||
Decompress TaskConfig `json:"decompress" envPrefix:"DECOMPRESS_"`
|
||||
DecompressUpload TaskConfig `json:"decompress_upload" envPrefix:"DECOMPRESS_UPLOAD_"`
|
||||
AllowRetryCanceled bool `json:"allow_retry_canceled" env:"ALLOW_RETRY_CANCELED"`
|
||||
}
|
||||
|
||||
type Cors struct {
|
||||
AllowOrigins []string `json:"allow_origins" env:"ALLOW_ORIGINS"`
|
||||
AllowMethods []string `json:"allow_methods" env:"ALLOW_METHODS"`
|
||||
AllowHeaders []string `json:"allow_headers" env:"ALLOW_HEADERS"`
|
||||
}
|
||||
|
||||
type S3 struct {
|
||||
Enable bool `json:"enable" env:"ENABLE"`
|
||||
Port int `json:"port" env:"PORT"`
|
||||
SSL bool `json:"ssl" env:"SSL"`
|
||||
}
|
||||
|
||||
type FTP struct {
|
||||
Enable bool `json:"enable" env:"ENABLE"`
|
||||
Listen string `json:"listen" env:"LISTEN"`
|
||||
FindPasvPortAttempts int `json:"find_pasv_port_attempts" env:"FIND_PASV_PORT_ATTEMPTS"`
|
||||
ActiveTransferPortNon20 bool `json:"active_transfer_port_non_20" env:"ACTIVE_TRANSFER_PORT_NON_20"`
|
||||
IdleTimeout int `json:"idle_timeout" env:"IDLE_TIMEOUT"`
|
||||
ConnectionTimeout int `json:"connection_timeout" env:"CONNECTION_TIMEOUT"`
|
||||
DisableActiveMode bool `json:"disable_active_mode" env:"DISABLE_ACTIVE_MODE"`
|
||||
DefaultTransferBinary bool `json:"default_transfer_binary" env:"DEFAULT_TRANSFER_BINARY"`
|
||||
EnableActiveConnIPCheck bool `json:"enable_active_conn_ip_check" env:"ENABLE_ACTIVE_CONN_IP_CHECK"`
|
||||
EnablePasvConnIPCheck bool `json:"enable_pasv_conn_ip_check" env:"ENABLE_PASV_CONN_IP_CHECK"`
|
||||
}
|
||||
|
||||
type SFTP struct {
|
||||
Enable bool `json:"enable" env:"ENABLE"`
|
||||
Listen string `json:"listen" env:"LISTEN"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Force bool `json:"force" env:"FORCE"`
|
||||
SiteURL string `json:"site_url" env:"SITE_URL"`
|
||||
Cdn string `json:"cdn" env:"CDN"`
|
||||
JwtSecret string `json:"jwt_secret" env:"JWT_SECRET"`
|
||||
TokenExpiresIn int `json:"token_expires_in" env:"TOKEN_EXPIRES_IN"`
|
||||
Database Database `json:"database" envPrefix:"DB_"`
|
||||
Meilisearch Meilisearch `json:"meilisearch" envPrefix:"MEILISEARCH_"`
|
||||
Scheme Scheme `json:"scheme"`
|
||||
TempDir string `json:"temp_dir" env:"TEMP_DIR"`
|
||||
BleveDir string `json:"bleve_dir" env:"BLEVE_DIR"`
|
||||
DistDir string `json:"dist_dir"`
|
||||
Log LogConfig `json:"log" envPrefix:"LOG_"`
|
||||
DelayedStart int `json:"delayed_start" env:"DELAYED_START"`
|
||||
MaxConnections int `json:"max_connections" env:"MAX_CONNECTIONS"`
|
||||
MaxConcurrency int `json:"max_concurrency" env:"MAX_CONCURRENCY"`
|
||||
TlsInsecureSkipVerify bool `json:"tls_insecure_skip_verify" env:"TLS_INSECURE_SKIP_VERIFY"`
|
||||
Tasks TasksConfig `json:"tasks" envPrefix:"TASKS_"`
|
||||
SiteURL string `json:"site_url" env:"SITE_URL"`
|
||||
Scheme Scheme `json:"scheme"`
|
||||
Cors Cors `json:"cors" envPrefix:"CORS_"`
|
||||
S3 S3 `json:"s3" envPrefix:"S3_"`
|
||||
FTP FTP `json:"ftp" envPrefix:"FTP_"`
|
||||
SFTP SFTP `json:"sftp" envPrefix:"SFTP_"`
|
||||
LastLaunchedVersion string `json:"last_launched_version"`
|
||||
}
|
||||
|
||||
func DefaultConfig(dataDir string) *Config {
|
||||
tempDir := filepath.Join(dataDir, "temp")
|
||||
indexDir := filepath.Join(dataDir, "bleve")
|
||||
logPath := filepath.Join(dataDir, "log/log.log")
|
||||
dbPath := filepath.Join(dataDir, "data.db")
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
TempDir: "temp",
|
||||
Scheme: Scheme{
|
||||
Address: "0.0.0.0",
|
||||
UnixFile: "",
|
||||
HttpPort: 5244,
|
||||
HttpsPort: -1,
|
||||
ForceHttps: false,
|
||||
CertFile: "",
|
||||
KeyFile: "",
|
||||
},
|
||||
JwtSecret: random.String(16),
|
||||
TokenExpiresIn: 48,
|
||||
TempDir: tempDir,
|
||||
Database: Database{
|
||||
Type: "sqlite3",
|
||||
Port: 0,
|
||||
TablePrefix: "x_",
|
||||
DBFile: dbPath,
|
||||
},
|
||||
Meilisearch: Meilisearch{
|
||||
Host: "http://localhost:7700",
|
||||
Index: "openlist",
|
||||
},
|
||||
BleveDir: indexDir,
|
||||
Log: LogConfig{
|
||||
Enable: true,
|
||||
Name: logPath,
|
||||
MaxSize: 50,
|
||||
MaxBackups: 30,
|
||||
MaxAge: 28,
|
||||
Filter: LogFilterConfig{
|
||||
Enable: false,
|
||||
Filters: []Filter{
|
||||
{Path: "/ping"},
|
||||
{Method: "HEAD"},
|
||||
{Path: "/dav/", Method: "PROPFIND"},
|
||||
},
|
||||
},
|
||||
},
|
||||
MaxConnections: 0,
|
||||
MaxConcurrency: 64,
|
||||
TlsInsecureSkipVerify: true,
|
||||
Tasks: TasksConfig{
|
||||
Download: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 1,
|
||||
// TaskPersistant: true,
|
||||
},
|
||||
Transfer: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
// TaskPersistant: true,
|
||||
},
|
||||
Upload: TaskConfig{
|
||||
Workers: 5,
|
||||
},
|
||||
Copy: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
// TaskPersistant: true,
|
||||
},
|
||||
Move: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
// TaskPersistant: true,
|
||||
},
|
||||
Decompress: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
// TaskPersistant: true,
|
||||
},
|
||||
DecompressUpload: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
},
|
||||
AllowRetryCanceled: false,
|
||||
},
|
||||
Cors: Cors{
|
||||
AllowOrigins: []string{"*"},
|
||||
AllowMethods: []string{"*"},
|
||||
AllowHeaders: []string{"*"},
|
||||
},
|
||||
S3: S3{
|
||||
Enable: false,
|
||||
Port: 5246,
|
||||
SSL: false,
|
||||
},
|
||||
FTP: FTP{
|
||||
Enable: false,
|
||||
Listen: ":5221",
|
||||
FindPasvPortAttempts: 50,
|
||||
ActiveTransferPortNon20: false,
|
||||
IdleTimeout: 900,
|
||||
ConnectionTimeout: 30,
|
||||
DisableActiveMode: false,
|
||||
DefaultTransferBinary: false,
|
||||
EnableActiveConnIPCheck: true,
|
||||
EnablePasvConnIPCheck: true,
|
||||
},
|
||||
SFTP: SFTP{
|
||||
Enable: false,
|
||||
Listen: ":5222",
|
||||
},
|
||||
LastLaunchedVersion: "",
|
||||
}
|
||||
}
|
||||
|
@ -1,33 +1,10 @@
|
||||
package conf
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
BuiltAt string = "unknown"
|
||||
GitAuthor string = "unknown"
|
||||
GitCommit string = "unknown"
|
||||
Version string = "dev"
|
||||
WebVersion string = "rolling"
|
||||
)
|
||||
import "regexp"
|
||||
|
||||
var (
|
||||
Conf *Config
|
||||
URL *url.URL
|
||||
SitePath string
|
||||
)
|
||||
|
||||
var SlicesMap = make(map[string][]string)
|
||||
var FilenameCharMap = make(map[string]string)
|
||||
var PrivacyReg []*regexp.Regexp
|
||||
|
||||
var (
|
||||
// StoragesLoaded loaded success if empty
|
||||
StoragesLoaded = false
|
||||
)
|
||||
var (
|
||||
RawIndexHtml string
|
||||
ManageHtml string
|
||||
IndexHtml string
|
||||
)
|
||||
|
9
internal/driver/var.go
Normal file
9
internal/driver/var.go
Normal file
@ -0,0 +1,9 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/go-plugin"
|
||||
)
|
||||
|
||||
var (
|
||||
PluginMap map[string]plugin.Plugin
|
||||
)
|
27
layers/file/driver.go
Normal file
27
layers/file/driver.go
Normal file
@ -0,0 +1,27 @@
|
||||
package file
|
||||
|
||||
import "context"
|
||||
|
||||
// HostFileServer 驱动文件接口 #################################################################
|
||||
type HostFileServer interface {
|
||||
// CopyFile 复制文件 =======================================================================
|
||||
CopyFile(ctx context.Context, sources []string, targets []string) ([]*BackFileAction, error)
|
||||
// MoveFile 移动文件 =======================================================================
|
||||
MoveFile(ctx context.Context, sources []string, targets []string) ([]*BackFileAction, error)
|
||||
// NameFile 移动文件 =======================================================================
|
||||
NameFile(ctx context.Context, sources []string, targets []string) ([]*BackFileAction, error)
|
||||
// ListFile 列举文件 =======================================================================
|
||||
ListFile(ctx context.Context, path []string, opt *ListFileOption) ([]*HostFileObject, error)
|
||||
// FindFile 搜索文件 =======================================================================
|
||||
FindFile(ctx context.Context, path []string, opt *FindFileOption) ([]*HostFileObject, error)
|
||||
// Download 获取文件 =======================================================================
|
||||
Download(ctx context.Context, path []string, opt *DownloadOption) ([]*LinkFileObject, error)
|
||||
// Uploader 上传文件 =======================================================================
|
||||
Uploader(ctx context.Context, path []string, opt *UploaderOption) ([]*BackFileAction, error)
|
||||
// KillFile 删除文件 =======================================================================
|
||||
KillFile(ctx context.Context, path []string, opt *KillFileOption) ([]*BackFileAction, error)
|
||||
// MakeFile 搜索文件 =======================================================================
|
||||
MakeFile(ctx context.Context, path []string, opt *MakeFileOption) ([]*BackFileAction, error)
|
||||
// MakePath 搜索文件 =======================================================================
|
||||
MakePath(ctx context.Context, path []string, opt *MakeFileOption) ([]*BackFileAction, error)
|
||||
}
|
71
layers/file/manage.go
Normal file
71
layers/file/manage.go
Normal file
@ -0,0 +1,71 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// UserFileServer 文件服务接口 #################################################################
|
||||
type UserFileServer interface {
|
||||
// CopyFile 复制文件 =======================================================================
|
||||
CopyFile(ctx context.Context, sources []string, targets []string) ([]*BackFileAction, error)
|
||||
// MoveFile 移动文件 =======================================================================
|
||||
MoveFile(ctx context.Context, sources []string, targets []string) ([]*BackFileAction, error)
|
||||
// NameFile 移动文件 =======================================================================
|
||||
NameFile(ctx context.Context, sources []string, targets []string) ([]*BackFileAction, error)
|
||||
// ListFile 列举文件 =======================================================================
|
||||
ListFile(ctx context.Context, path []string, opt *ListFileOption) ([]*UserFileObject, error)
|
||||
// FindFile 搜索文件 =======================================================================
|
||||
FindFile(ctx context.Context, path []string, opt *FindFileOption) ([]*UserFileObject, error)
|
||||
// Download 获取文件 =======================================================================
|
||||
Download(ctx context.Context, path []string, opt *DownloadOption) ([]*LinkFileObject, error)
|
||||
// Uploader 上传文件 =======================================================================
|
||||
Uploader(ctx context.Context, path []string, opt *UploaderOption) ([]*BackFileAction, error)
|
||||
// KillFile 删除文件 =======================================================================
|
||||
KillFile(ctx context.Context, path []string, opt *KillFileOption) ([]*BackFileAction, error)
|
||||
// MakeFile 搜索文件 =======================================================================
|
||||
MakeFile(ctx context.Context, path []string, opt *MakeFileOption) ([]*BackFileAction, error)
|
||||
// MakePath 搜索文件 =======================================================================
|
||||
MakePath(ctx context.Context, path []string, opt *MakeFileOption) ([]*BackFileAction, error)
|
||||
// PermFile 设置权限 =======================================================================
|
||||
PermFile(ctx context.Context, path []string, opt *PermissionFile) ([]*BackFileAction, error)
|
||||
//// NewShare 创建分享 =======================================================================
|
||||
//NewShare(ctx context.Context, path []string, opt *NewShareAction) ([]*BackFileAction, error)
|
||||
//// GetShare 获取分享 =======================================================================
|
||||
//GetShare(ctx context.Context, path []string, opt *NewShareAction) ([]*UserFileObject, error)
|
||||
//// DelShare 删除分享 =======================================================================
|
||||
//DelShare(ctx context.Context, path []string, opt *NewShareAction) ([]*BackFileAction, error)
|
||||
}
|
||||
|
||||
type UserFileUpload interface {
|
||||
fullPost(ctx context.Context, path []string)
|
||||
pfCreate(ctx context.Context, path []string)
|
||||
pfUpload(ctx context.Context, path []string)
|
||||
pfUpdate(ctx context.Context, path []string)
|
||||
}
|
||||
|
||||
func ListFile(ctx context.Context, path []string, opt *ListFileOption) ([]*UserFileObject, error) {
|
||||
return ListDeal([]*HostFileObject{})
|
||||
}
|
||||
|
||||
func FindFile(ctx context.Context, path []string, opt *ListFileOption) ([]*UserFileObject, error) {
|
||||
return ListDeal([]*HostFileObject{})
|
||||
}
|
||||
|
||||
func ListDeal(originList []*HostFileObject) ([]*UserFileObject, error) {
|
||||
serverList := make([]*UserFileObject, 0)
|
||||
for _, fileItem := range originList {
|
||||
serverList = append(serverList, &UserFileObject{
|
||||
HostFileObject: *fileItem,
|
||||
// ... 用户层逻辑
|
||||
})
|
||||
}
|
||||
return serverList, nil
|
||||
}
|
||||
|
||||
func Download(ctx context.Context, path []string, opt *ListFileOption) ([]*LinkFileObject, error) {
|
||||
|
||||
}
|
||||
|
||||
func Uploader(ctx context.Context, path []string, opt *ListFileOption) ([]*BackFileAction, error) {
|
||||
|
||||
}
|
79
layers/file/object.go
Normal file
79
layers/file/object.go
Normal file
@ -0,0 +1,79 @@
|
||||
package file
|
||||
|
||||
import "time"
|
||||
|
||||
// HostFileObject 驱动层获取获取的文件信息
|
||||
type HostFileObject struct {
|
||||
realName []string // 真实名称
|
||||
previews []string // 文件预览
|
||||
fileSize int64 // 文件大小
|
||||
lastTime time.Time // 修改时间
|
||||
makeTime time.Time // 创建时间
|
||||
fileType bool // 文件类型
|
||||
fileHash string // 文件哈希
|
||||
hashType int16 // 哈希类型
|
||||
}
|
||||
|
||||
// UserFileObject 由用户层转换后的文件信息
|
||||
type UserFileObject struct {
|
||||
HostFileObject
|
||||
showPath []string // 文件路径
|
||||
showName []string // 文件名称
|
||||
realPath []string // 真实路径
|
||||
checksum int32 // 密码校验
|
||||
fileMask int16 // 文件权限
|
||||
encrypts int16 // 文件状态
|
||||
|
||||
// 下列信息用于前端展示文件用
|
||||
enc_type string // 加解密类型
|
||||
enc_from string // 文件密码源
|
||||
enc_pass string // 加解密密码
|
||||
com_type string // 压缩的类型
|
||||
sub_nums int16 // 子文件数量
|
||||
|
||||
// 下列信息用于后端内部处理用
|
||||
// fileMask =================
|
||||
// 占用:000000 0 000 000 000
|
||||
// 含义:ABCDEF 1 421 421 421
|
||||
// A-加密 B-前端解密 C-自解密
|
||||
// D-is分卷 E-is压缩 F-is隐藏
|
||||
// encrypts =================
|
||||
// 占用位:0000000000 00 0000
|
||||
// 含义为:分卷数量 压缩 加密
|
||||
}
|
||||
|
||||
type PermissionFile struct {
|
||||
}
|
||||
|
||||
type LinkFileObject struct {
|
||||
download []string // 下载链接
|
||||
usrAgent []string // 用户代理
|
||||
}
|
||||
|
||||
type ListFileOption struct {
|
||||
}
|
||||
|
||||
type FindFileOption struct {
|
||||
}
|
||||
|
||||
type KillFileOption struct {
|
||||
}
|
||||
type MakeFileOption struct {
|
||||
}
|
||||
type DownloadOption struct {
|
||||
downType int8 // 下载类型
|
||||
|
||||
}
|
||||
type UploaderOption struct {
|
||||
}
|
||||
type BackFileAction struct {
|
||||
success bool // 是否成功
|
||||
message string // 错误信息
|
||||
}
|
||||
type NewShareAction struct {
|
||||
BackFileAction
|
||||
shareID string // 分享编码
|
||||
pubUrls string // 公开链接
|
||||
passkey string // 分析密码
|
||||
expired time.Time // 过期时间
|
||||
}
|
16
layers/perm/fsmask.go
Normal file
16
layers/perm/fsmask.go
Normal file
@ -0,0 +1,16 @@
|
||||
package perm
|
||||
|
||||
type FileMask struct {
|
||||
uuid string // 密钥UUID
|
||||
user string // 所属用户
|
||||
path string // 匹配路径
|
||||
name string // 友好名称
|
||||
idKeyset string // 密钥集ID
|
||||
encrypts string // 加密组ID
|
||||
password string // 独立密码
|
||||
fileUser string // 所有用户
|
||||
filePart int64 // 分卷大小
|
||||
fileMask int16 // 文件权限
|
||||
compress int16 // 是否压缩
|
||||
isEnable bool // 是否启用
|
||||
}
|
22
layers/perm/keyset.go
Normal file
22
layers/perm/keyset.go
Normal file
@ -0,0 +1,22 @@
|
||||
package perm
|
||||
|
||||
type UserKeys struct {
|
||||
uuid string // 密钥UUID
|
||||
user string // 所属用户
|
||||
main string // 核心密钥(用户密钥SHA2)
|
||||
name string // 友好名称
|
||||
algo int8 // 密钥算法
|
||||
enabled bool // 是否启用
|
||||
encFile bool // 加密文件
|
||||
encName bool // 加密名称
|
||||
keyAuto bool // 自动更新
|
||||
keyRand bool // 随机密钥
|
||||
keyAuth UserAuth // 密钥认证
|
||||
}
|
||||
|
||||
type UserAuth struct {
|
||||
uuid string // 密钥UUID
|
||||
user string // 所属用户
|
||||
plugin string // 认证插件
|
||||
config string // 认证配置
|
||||
}
|
10
layers/perm/shared.go
Normal file
10
layers/perm/shared.go
Normal file
@ -0,0 +1,10 @@
|
||||
package perm
|
||||
|
||||
type ShareUrl struct {
|
||||
uuid string // 密钥UUID
|
||||
user string // 所属用户
|
||||
path string // 分享路径
|
||||
pass string // 分享密码
|
||||
date string // 过期时间
|
||||
flag bool // 是否有效
|
||||
}
|
14
layers/user/object.go
Normal file
14
layers/user/object.go
Normal file
@ -0,0 +1,14 @@
|
||||
package user
|
||||
|
||||
type UserInfo struct {
|
||||
uuid string // 用户UUID
|
||||
name string // 用户名称
|
||||
flag bool // 是否有效
|
||||
perm PermInfo // 权限信息
|
||||
}
|
||||
|
||||
type PermInfo struct {
|
||||
isAdmin bool // 是否管理员
|
||||
davRead bool // 是否允许读
|
||||
// ...
|
||||
}
|
2
main.go
2
main.go
@ -1,6 +1,6 @@
|
||||
package main
|
||||
|
||||
import "github.com/OpenListTeam/OpenList/v4/cmd"
|
||||
import "github.com/OpenListTeam/OpenList/v5/cmd"
|
||||
|
||||
func main() {
|
||||
cmd.Execute()
|
||||
|
@ -93,6 +93,7 @@ jobs:
|
||||
run: bash build.sh dev web
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Build
|
||||
uses: OpenListTeam/cgo-actions@v1.2.2
|
@ -39,6 +39,7 @@ jobs:
|
||||
run: bash build.sh dev web
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Build
|
||||
uses: OpenListTeam/cgo-actions@v1.2.2
|
@ -66,6 +66,7 @@ jobs:
|
||||
bash build.sh release ${{ matrix.build-type == 'lite' && 'lite' || '' }} ${{ matrix.target-platform }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
@ -66,6 +66,7 @@ jobs:
|
||||
run: bash build.sh release docker-multiplatform
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
@ -105,6 +106,7 @@ jobs:
|
||||
run: bash build.sh release lite docker-multiplatform
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
@ -55,6 +55,7 @@ jobs:
|
||||
run: bash build.sh beta docker-multiplatform
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
34
origin/.gitignore
vendored
Normal file
34
origin/.gitignore
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
.idea/
|
||||
.DS_Store
|
||||
output/
|
||||
/dist/
|
||||
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
*.db
|
||||
*.bin
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
/bin/*
|
||||
*.json
|
||||
/build
|
||||
/data/
|
||||
/tmp/
|
||||
/log/
|
||||
/lang/
|
||||
/daemon/
|
||||
/public/dist/*
|
||||
/!public/dist/README.md
|
||||
|
||||
.VSCodeCounter
|
@ -1,3 +1,6 @@
|
||||
### Default image is base. You can add other support by modifying BASE_IMAGE_TAG. The following parameters are supported: base (default), aria2, ffmpeg, aio
|
||||
ARG BASE_IMAGE_TAG=base
|
||||
|
||||
FROM alpine:edge AS builder
|
||||
LABEL stage=go-builder
|
||||
WORKDIR /app/
|
||||
@ -7,21 +10,26 @@ RUN go mod download
|
||||
COPY ./ ./
|
||||
RUN bash build.sh release docker
|
||||
|
||||
### Default image is base. You can add other support by modifying BASE_IMAGE_TAG. The following parameters are supported: base (default), aria2, ffmpeg, aio
|
||||
ARG BASE_IMAGE_TAG=base
|
||||
FROM openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
|
||||
|
||||
LABEL MAINTAINER="OpenList"
|
||||
ARG INSTALL_FFMPEG=false
|
||||
ARG INSTALL_ARIA2=false
|
||||
LABEL MAINTAINER="OpenList"
|
||||
ARG USER=openlist
|
||||
ARG UID=1001
|
||||
ARG GID=1001
|
||||
|
||||
WORKDIR /opt/openlist/
|
||||
|
||||
COPY --chmod=755 --from=builder /app/bin/openlist ./
|
||||
COPY --chmod=755 entrypoint.sh /entrypoint.sh
|
||||
RUN adduser -u ${UID} -g ${GID} -h /opt/openlist/data -D -s /bin/sh ${USER} \
|
||||
&& chown -R ${UID}:${GID} /opt \
|
||||
&& chown -R ${UID}:${GID} /entrypoint.sh
|
||||
|
||||
USER ${USER}
|
||||
RUN /entrypoint.sh version
|
||||
|
||||
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
VOLUME /opt/openlist/data/
|
||||
EXPOSE 5244 5245
|
||||
CMD [ "/entrypoint.sh" ]
|
@ -1,18 +1,26 @@
|
||||
ARG BASE_IMAGE_TAG=base
|
||||
FROM ghcr.io/openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
|
||||
|
||||
LABEL MAINTAINER="OpenList"
|
||||
ARG TARGETPLATFORM
|
||||
ARG INSTALL_FFMPEG=false
|
||||
ARG INSTALL_ARIA2=false
|
||||
LABEL MAINTAINER="OpenList"
|
||||
ARG USER=openlist
|
||||
ARG UID=1001
|
||||
ARG GID=1001
|
||||
|
||||
WORKDIR /opt/openlist/
|
||||
|
||||
COPY --chmod=755 /build/${TARGETPLATFORM}/openlist ./
|
||||
COPY --chmod=755 entrypoint.sh /entrypoint.sh
|
||||
|
||||
RUN adduser -u ${UID} -g ${GID} -h /opt/openlist/data -D -s /bin/sh ${USER} \
|
||||
&& chown -R ${UID}:${GID} /opt \
|
||||
&& chown -R ${UID}:${GID} /entrypoint.sh
|
||||
|
||||
USER ${USER}
|
||||
RUN /entrypoint.sh version
|
||||
|
||||
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
VOLUME /opt/openlist/data/
|
||||
EXPOSE 5244 5245
|
||||
CMD [ "/entrypoint.sh" ]
|
@ -4,6 +4,9 @@ builtAt="$(date +'%F %T %z')"
|
||||
gitAuthor="The OpenList Projects Contributors <noreply@openlist.team>"
|
||||
gitCommit=$(git log --pretty=format:"%h" -1)
|
||||
|
||||
# Set frontend repository, default to OpenListTeam/OpenList-Frontend
|
||||
frontendRepo="${FRONTEND_REPO:-OpenListTeam/OpenList-Frontend}"
|
||||
|
||||
githubAuthArgs=""
|
||||
if [ -n "$GITHUB_TOKEN" ]; then
|
||||
githubAuthArgs="--header \"Authorization: Bearer $GITHUB_TOKEN\""
|
||||
@ -25,7 +28,7 @@ else
|
||||
git tag -d beta || true
|
||||
# Always true if there's no tag
|
||||
version=$(git describe --abbrev=0 --tags 2>/dev/null || echo "v0.0.0")
|
||||
webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
|
||||
webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/$frontendRepo/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
|
||||
fi
|
||||
|
||||
echo "backend version: $version"
|
||||
@ -46,7 +49,7 @@ ldflags="\
|
||||
"
|
||||
|
||||
FetchWebRolling() {
|
||||
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/tags/rolling\"")
|
||||
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/tags/rolling\"")
|
||||
pre_release_assets=$(echo "$pre_release_json" | jq -r '.assets[].browser_download_url')
|
||||
|
||||
# There is no lite for rolling
|
||||
@ -59,7 +62,7 @@ FetchWebRolling() {
|
||||
}
|
||||
|
||||
FetchWebRelease() {
|
||||
release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"")
|
||||
release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/latest\"")
|
||||
release_assets=$(echo "$release_json" | jq -r '.assets[].browser_download_url')
|
||||
|
||||
if [ "$useLite" = true ]; then
|
51
origin/cmd/common.go
Normal file
51
origin/cmd/common.go
Normal file
@ -0,0 +1,51 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/bootstrap"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/bootstrap/data"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/db"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func Init() {
|
||||
bootstrap.InitConfig()
|
||||
bootstrap.Log()
|
||||
bootstrap.InitDB()
|
||||
data.InitData()
|
||||
bootstrap.InitStreamLimit()
|
||||
bootstrap.InitIndex()
|
||||
bootstrap.InitUpgradePatch()
|
||||
}
|
||||
|
||||
func Release() {
|
||||
db.Close()
|
||||
}
|
||||
|
||||
var pid = -1
|
||||
var pidFile string
|
||||
|
||||
func initDaemon() {
|
||||
ex, err := os.Executable()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
exPath := filepath.Dir(ex)
|
||||
_ = os.MkdirAll(filepath.Join(exPath, "daemon"), 0700)
|
||||
pidFile = filepath.Join(exPath, "daemon/pid")
|
||||
if utils.Exists(pidFile) {
|
||||
bytes, err := os.ReadFile(pidFile)
|
||||
if err != nil {
|
||||
log.Fatal("failed to read pid file", err)
|
||||
}
|
||||
id, err := strconv.Atoi(string(bytes))
|
||||
if err != nil {
|
||||
log.Fatal("failed to parse pid data", err)
|
||||
}
|
||||
pid = id
|
||||
}
|
||||
}
|
10
origin/cmd/flags/config.go
Normal file
10
origin/cmd/flags/config.go
Normal file
@ -0,0 +1,10 @@
|
||||
package flags
|
||||
|
||||
var (
|
||||
DataDir string
|
||||
Debug bool
|
||||
NoPrefix bool
|
||||
Dev bool
|
||||
ForceBinDir bool
|
||||
LogStd bool
|
||||
)
|
36
origin/cmd/root.go
Normal file
36
origin/cmd/root.go
Normal file
@ -0,0 +1,36 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/cmd/flags"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/internal/archive"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/internal/offline_download"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "openlist",
|
||||
Short: "A file list program that supports multiple storage.",
|
||||
Long: `A file list program that supports multiple storage,
|
||||
built with love by OpenListTeam.
|
||||
Complete documentation is available at https://doc.oplist.org/`,
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
if err := RootCmd.Execute(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data folder")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.Debug, "debug", false, "start with debug mode")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.NoPrefix, "no-prefix", false, "disable env prefix")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.Dev, "dev", false, "start with dev mode")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.ForceBinDir, "force-bin-dir", false, "Force to use the directory where the binary file is located as data directory")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.LogStd, "log-std", false, "Force to log to std")
|
||||
}
|
261
origin/cmd/server.go
Normal file
261
origin/cmd/server.go
Normal file
@ -0,0 +1,261 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/cmd/flags"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/bootstrap"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/middlewares"
|
||||
"github.com/OpenListTeam/sftpd-openlist"
|
||||
ftpserver "github.com/fclairamb/ftpserverlib"
|
||||
"github.com/gin-gonic/gin"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/h2c"
|
||||
)
|
||||
|
||||
// ServerCmd represents the server command
|
||||
var ServerCmd = &cobra.Command{
|
||||
Use: "server",
|
||||
Short: "Start the server at the specified address",
|
||||
Long: `Start the server at the specified address
|
||||
the address is defined in config file`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Init()
|
||||
if conf.Conf.DelayedStart != 0 {
|
||||
utils.Log.Infof("delayed start for %d seconds", conf.Conf.DelayedStart)
|
||||
time.Sleep(time.Duration(conf.Conf.DelayedStart) * time.Second)
|
||||
}
|
||||
bootstrap.InitOfflineDownloadTools()
|
||||
bootstrap.LoadStorages()
|
||||
bootstrap.InitTaskManager()
|
||||
if !flags.Debug && !flags.Dev {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
}
|
||||
r := gin.New()
|
||||
|
||||
// gin log
|
||||
if conf.Conf.Log.Filter.Enable {
|
||||
r.Use(middlewares.FilteredLogger())
|
||||
} else {
|
||||
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out))
|
||||
}
|
||||
r.Use(gin.RecoveryWithWriter(log.StandardLogger().Out))
|
||||
|
||||
server.Init(r)
|
||||
var httpHandler http.Handler = r
|
||||
if conf.Conf.Scheme.EnableH2c {
|
||||
httpHandler = h2c.NewHandler(r, &http2.Server{})
|
||||
}
|
||||
var httpSrv, httpsSrv, unixSrv *http.Server
|
||||
if conf.Conf.Scheme.HttpPort != -1 {
|
||||
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
|
||||
fmt.Printf("start HTTP server @ %s\n", httpBase)
|
||||
utils.Log.Infof("start HTTP server @ %s", httpBase)
|
||||
httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler}
|
||||
go func() {
|
||||
err := httpSrv.ListenAndServe()
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
utils.Log.Fatalf("failed to start http: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.Scheme.HttpsPort != -1 {
|
||||
httpsBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpsPort)
|
||||
fmt.Printf("start HTTPS server @ %s\n", httpsBase)
|
||||
utils.Log.Infof("start HTTPS server @ %s", httpsBase)
|
||||
httpsSrv = &http.Server{Addr: httpsBase, Handler: r}
|
||||
go func() {
|
||||
err := httpsSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
utils.Log.Fatalf("failed to start https: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.Scheme.UnixFile != "" {
|
||||
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
|
||||
utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile)
|
||||
unixSrv = &http.Server{Handler: httpHandler}
|
||||
go func() {
|
||||
listener, err := net.Listen("unix", conf.Conf.Scheme.UnixFile)
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to listen unix: %+v", err)
|
||||
}
|
||||
// set socket file permission
|
||||
mode, err := strconv.ParseUint(conf.Conf.Scheme.UnixFilePerm, 8, 32)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to parse socket file permission: %+v", err)
|
||||
} else {
|
||||
err = os.Chmod(conf.Conf.Scheme.UnixFile, os.FileMode(mode))
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to chmod socket file: %+v", err)
|
||||
}
|
||||
}
|
||||
err = unixSrv.Serve(listener)
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
utils.Log.Fatalf("failed to start unix: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.S3.Port != -1 && conf.Conf.S3.Enable {
|
||||
s3r := gin.New()
|
||||
s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
|
||||
server.InitS3(s3r)
|
||||
s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port)
|
||||
fmt.Printf("start S3 server @ %s\n", s3Base)
|
||||
utils.Log.Infof("start S3 server @ %s", s3Base)
|
||||
go func() {
|
||||
var err error
|
||||
if conf.Conf.S3.SSL {
|
||||
httpsSrv = &http.Server{Addr: s3Base, Handler: s3r}
|
||||
err = httpsSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
|
||||
}
|
||||
if !conf.Conf.S3.SSL {
|
||||
httpSrv = &http.Server{Addr: s3Base, Handler: s3r}
|
||||
err = httpSrv.ListenAndServe()
|
||||
}
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
utils.Log.Fatalf("failed to start s3 server: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
var ftpDriver *server.FtpMainDriver
|
||||
var ftpServer *ftpserver.FtpServer
|
||||
if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable {
|
||||
var err error
|
||||
ftpDriver, err = server.NewMainDriver()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to start ftp driver: %s", err.Error())
|
||||
} else {
|
||||
fmt.Printf("start ftp server on %s\n", conf.Conf.FTP.Listen)
|
||||
utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen)
|
||||
go func() {
|
||||
ftpServer = ftpserver.NewFtpServer(ftpDriver)
|
||||
err = ftpServer.ListenAndServe()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("problem ftp server listening: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
var sftpDriver *server.SftpDriver
|
||||
var sftpServer *sftpd.SftpServer
|
||||
if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable {
|
||||
var err error
|
||||
sftpDriver, err = server.NewSftpDriver()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to start sftp driver: %s", err.Error())
|
||||
} else {
|
||||
fmt.Printf("start sftp server on %s", conf.Conf.SFTP.Listen)
|
||||
utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen)
|
||||
go func() {
|
||||
sftpServer = sftpd.NewSftpServer(sftpDriver)
|
||||
err = sftpServer.RunServer()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("problem sftp server listening: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
// Wait for interrupt signal to gracefully shutdown the server with
|
||||
// a timeout of 1 second.
|
||||
quit := make(chan os.Signal, 1)
|
||||
// kill (no param) default send syscanll.SIGTERM
|
||||
// kill -2 is syscall.SIGINT
|
||||
// kill -9 is syscall. SIGKILL but can"t be catch, so don't need add it
|
||||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-quit
|
||||
utils.Log.Println("Shutdown server...")
|
||||
fs.ArchiveContentUploadTaskManager.RemoveAll()
|
||||
Release()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
var wg sync.WaitGroup
|
||||
if conf.Conf.Scheme.HttpPort != -1 {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := httpSrv.Shutdown(ctx); err != nil {
|
||||
utils.Log.Fatal("HTTP server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.Scheme.HttpsPort != -1 {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := httpsSrv.Shutdown(ctx); err != nil {
|
||||
utils.Log.Fatal("HTTPS server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.Scheme.UnixFile != "" {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := unixSrv.Shutdown(ctx); err != nil {
|
||||
utils.Log.Fatal("Unix server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable && ftpServer != nil && ftpDriver != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ftpDriver.Stop()
|
||||
if err := ftpServer.Stop(); err != nil {
|
||||
utils.Log.Fatal("FTP server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable && sftpServer != nil && sftpDriver != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := sftpServer.Close(); err != nil {
|
||||
utils.Log.Fatal("SFTP server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
utils.Log.Println("Server exit")
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(ServerCmd)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
// and all subcommands, e.g.:
|
||||
// serverCmd.PersistentFlags().String("foo", "", "A help for foo")
|
||||
|
||||
// Cobra supports local flags which will only run when this command
|
||||
// is called directly, e.g.:
|
||||
// serverCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
}
|
||||
|
||||
// OutOpenListInit 暴露用于外部启动server的函数
|
||||
func OutOpenListInit() {
|
||||
var (
|
||||
cmd *cobra.Command
|
||||
args []string
|
||||
)
|
||||
ServerCmd.Run(cmd, args)
|
||||
}
|
@ -9,6 +9,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/db"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/charmbracelet/bubbles/table"
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
@ -22,8 +23,8 @@ var storageCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
var disableStorageCmd = &cobra.Command{
|
||||
Use: "disable",
|
||||
Short: "Disable a storage",
|
||||
Use: "disable [mount path]",
|
||||
Short: "Disable a storage by mount path",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("mount path is required")
|
||||
@ -34,15 +35,48 @@ var disableStorageCmd = &cobra.Command{
|
||||
storage, err := db.GetStorageByMountPath(mountPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query storage: %+v", err)
|
||||
} else {
|
||||
}
|
||||
storage.Disabled = true
|
||||
err = db.UpdateStorage(storage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update storage: %+v", err)
|
||||
} else {
|
||||
fmt.Printf("Storage with mount path [%s] have been disabled\n", mountPath)
|
||||
}
|
||||
utils.Log.Infof("Storage with mount path [%s] has been disabled from CLI", mountPath)
|
||||
fmt.Printf("Storage with mount path [%s] has been disabled\n", mountPath)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var deleteStorageCmd = &cobra.Command{
|
||||
Use: "delete [id]",
|
||||
Short: "Delete a storage by id",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("id is required")
|
||||
}
|
||||
id, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("id must be a number")
|
||||
}
|
||||
|
||||
if force, _ := cmd.Flags().GetBool("force"); force {
|
||||
fmt.Printf("Are you sure you want to delete storage with id [%d]? [y/N]: ", id)
|
||||
var confirm string
|
||||
fmt.Scanln(&confirm)
|
||||
if confirm != "y" && confirm != "Y" {
|
||||
fmt.Println("Delete operation cancelled.")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
Init()
|
||||
defer Release()
|
||||
err = db.DeleteStorageById(uint(id))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete storage by id: %+v", err)
|
||||
}
|
||||
utils.Log.Infof("Storage with id [%d] have been deleted from CLI", id)
|
||||
fmt.Printf("Storage with id [%d] have been deleted\n", id)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -152,6 +186,8 @@ func init() {
|
||||
storageCmd.AddCommand(disableStorageCmd)
|
||||
storageCmd.AddCommand(listStorageCmd)
|
||||
storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height")
|
||||
storageCmd.AddCommand(deleteStorageCmd)
|
||||
deleteStorageCmd.Flags().BoolP("force", "f", false, "Force delete without confirmation")
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
@ -186,9 +186,7 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
preHash = strings.ToUpper(preHash)
|
||||
fullHash := stream.GetHash().GetHash(utils.SHA1)
|
||||
if len(fullHash) != utils.SHA1.Width {
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA1)
|
||||
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
@ -321,7 +321,7 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
|
||||
err error
|
||||
)
|
||||
|
||||
tmpF, err := s.CacheFullInTempFile()
|
||||
tmpF, err := s.CacheFullAndWriter(&up, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
@ -239,9 +239,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
||||
}
|
||||
sha1 := file.GetHash().GetHash(utils.SHA1)
|
||||
if len(sha1) != utils.SHA1.Width {
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, sha1, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.SHA1)
|
||||
_, sha1, err = stream.CacheFullAndHash(file, &up, utils.SHA1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
@ -9,6 +9,7 @@ import (
|
||||
sdk "github.com/OpenListTeam/115-sdk-go"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/avast/retry-go"
|
||||
@ -69,9 +70,6 @@ func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp
|
||||
// }
|
||||
|
||||
func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
|
||||
fileSize := stream.GetSize()
|
||||
chunkSize := calPartSize(fileSize)
|
||||
|
||||
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
|
||||
if err != nil {
|
||||
return err
|
||||
@ -86,6 +84,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
|
||||
return err
|
||||
}
|
||||
|
||||
fileSize := stream.GetSize()
|
||||
chunkSize := calPartSize(fileSize)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
|
||||
parts := make([]oss.UploadPart, partNum)
|
||||
offset := int64(0)
|
||||
@ -98,10 +103,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
|
||||
if i == partNum {
|
||||
partSize = fileSize - (i-1)*chunkSize
|
||||
}
|
||||
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
||||
err = retry.Do(func() error {
|
||||
_ = rd.Reset()
|
||||
rd, err := ss.GetSectionReader(offset, partSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
||||
err = retry.Do(func() error {
|
||||
rd.Seek(0, io.SeekStart)
|
||||
part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i))
|
||||
if err != nil {
|
||||
return err
|
||||
@ -112,6 +120,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second))
|
||||
ss.FreeSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
@ -182,9 +182,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
|
||||
etag := file.GetHash().GetHash(utils.MD5)
|
||||
var err error
|
||||
if len(etag) < utils.MD5.Width {
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
|
||||
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
@ -12,6 +12,7 @@ type Addition struct {
|
||||
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
|
||||
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
AccessToken string
|
||||
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
@ -22,6 +23,11 @@ var config = driver.Config{
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Pan123{}
|
||||
// 新增默认选项 要在RegisterDriver初始化设置 才会对正在使用的用户生效
|
||||
return &Pan123{
|
||||
Addition: Addition{
|
||||
UploadThread: 3,
|
||||
},
|
||||
}
|
||||
})
|
||||
}
|
196
origin/drivers/123/upload.go
Normal file
196
origin/drivers/123/upload.go
Normal file
@ -0,0 +1,196 @@
|
||||
package _123
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
func (d *Pan123) getS3PreSignedUrls(ctx context.Context, upReq *UploadResp, start, end int) (*S3PreSignedURLs, error) {
|
||||
data := base.Json{
|
||||
"bucket": upReq.Data.Bucket,
|
||||
"key": upReq.Data.Key,
|
||||
"partNumberEnd": end,
|
||||
"partNumberStart": start,
|
||||
"uploadId": upReq.Data.UploadId,
|
||||
"StorageNode": upReq.Data.StorageNode,
|
||||
}
|
||||
var s3PreSignedUrls S3PreSignedURLs
|
||||
_, err := d.Request(S3PreSignedUrls, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetContext(ctx)
|
||||
}, &s3PreSignedUrls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s3PreSignedUrls, nil
|
||||
}
|
||||
|
||||
func (d *Pan123) getS3Auth(ctx context.Context, upReq *UploadResp, start, end int) (*S3PreSignedURLs, error) {
|
||||
data := base.Json{
|
||||
"StorageNode": upReq.Data.StorageNode,
|
||||
"bucket": upReq.Data.Bucket,
|
||||
"key": upReq.Data.Key,
|
||||
"partNumberEnd": end,
|
||||
"partNumberStart": start,
|
||||
"uploadId": upReq.Data.UploadId,
|
||||
}
|
||||
var s3PreSignedUrls S3PreSignedURLs
|
||||
_, err := d.Request(S3Auth, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetContext(ctx)
|
||||
}, &s3PreSignedUrls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s3PreSignedUrls, nil
|
||||
}
|
||||
|
||||
func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.FileStreamer, isMultipart bool) error {
|
||||
data := base.Json{
|
||||
"StorageNode": upReq.Data.StorageNode,
|
||||
"bucket": upReq.Data.Bucket,
|
||||
"fileId": upReq.Data.FileId,
|
||||
"fileSize": file.GetSize(),
|
||||
"isMultipart": isMultipart,
|
||||
"key": upReq.Data.Key,
|
||||
"uploadId": upReq.Data.UploadId,
|
||||
}
|
||||
_, err := d.Request(UploadCompleteV2, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetContext(ctx)
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
// fetch s3 pre signed urls
|
||||
size := file.GetSize()
|
||||
chunkSize := int64(16 * utils.MB)
|
||||
chunkCount := 1
|
||||
if size > chunkSize {
|
||||
chunkCount = int((size + chunkSize - 1) / chunkSize)
|
||||
}
|
||||
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastChunkSize := size % chunkSize
|
||||
if lastChunkSize == 0 {
|
||||
lastChunkSize = chunkSize
|
||||
}
|
||||
// only 1 batch is allowed
|
||||
batchSize := 1
|
||||
getS3UploadUrl := d.getS3Auth
|
||||
if chunkCount > 1 {
|
||||
batchSize = 10
|
||||
getS3UploadUrl = d.getS3PreSignedUrls
|
||||
}
|
||||
|
||||
thread := min(int(chunkCount), d.UploadThread)
|
||||
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
for i := 1; i <= chunkCount; i += batchSize {
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
break
|
||||
}
|
||||
start := i
|
||||
end := min(i+batchSize, chunkCount+1)
|
||||
s3PreSignedUrls, err := getS3UploadUrl(uploadCtx, upReq, start, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// upload each chunk
|
||||
for cur := start; cur < end; cur++ {
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
break
|
||||
}
|
||||
offset := int64(cur-1) * chunkSize
|
||||
curSize := chunkSize
|
||||
if cur == chunkCount {
|
||||
curSize = lastChunkSize
|
||||
}
|
||||
var reader *stream.SectionReader
|
||||
var rateLimitedRd io.Reader
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
if reader == nil {
|
||||
var err error
|
||||
reader, err = ss.GetSectionReader(offset, curSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Do: func(ctx context.Context) error {
|
||||
reader.Seek(0, io.SeekStart)
|
||||
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
|
||||
if uploadUrl == "" {
|
||||
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
|
||||
}
|
||||
reader.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, rateLimitedRd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.ContentLength = curSize
|
||||
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusForbidden {
|
||||
singleflight.AnyGroup.Do(fmt.Sprintf("Pan123.newUpload_%p", threadG), func() (any, error) {
|
||||
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
|
||||
return nil, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
|
||||
}
|
||||
progress := 10.0 + 85.0*float64(threadG.Success())/float64(chunkCount)
|
||||
up(progress)
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
ss.FreeSectionReader(reader)
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
if err := threadG.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer up(100)
|
||||
// complete s3 upload
|
||||
return d.completeS3(ctx, upReq, file, chunkCount > 1)
|
||||
}
|
@ -2,7 +2,9 @@ package _123_open
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
@ -95,6 +97,22 @@ func (d *Open123) Rename(ctx context.Context, srcObj model.Obj, newName string)
|
||||
}
|
||||
|
||||
func (d *Open123) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
// 尝试使用上传+MD5秒传功能实现复制
|
||||
// 1. 创建文件
|
||||
// parentFileID 父目录id,上传到根目录时填写 0
|
||||
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse parentFileID error: %v", err)
|
||||
}
|
||||
etag := srcObj.(File).Etag
|
||||
createResp, err := d.create(parentFileId, srcObj.GetName(), etag, srcObj.GetSize(), 2, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 是否秒传
|
||||
if createResp.Data.Reuse {
|
||||
return nil
|
||||
}
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
@ -104,27 +122,64 @@ func (d *Open123) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return d.trash(fileId)
|
||||
}
|
||||
|
||||
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 1. 创建文件
|
||||
// parentFileID 父目录id,上传到根目录时填写 0
|
||||
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
|
||||
etag := file.GetHash().GetHash(utils.MD5)
|
||||
|
||||
if len(etag) < utils.MD5.Width {
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, fmt.Errorf("parse parentFileID error: %v", err)
|
||||
}
|
||||
// etag 文件md5
|
||||
etag := file.GetHash().GetHash(utils.MD5)
|
||||
if len(etag) < utils.MD5.Width {
|
||||
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// 是否秒传
|
||||
if createResp.Data.Reuse {
|
||||
return nil
|
||||
// 秒传成功才会返回正确的 FileID,否则为 0
|
||||
if createResp.Data.FileID != 0 {
|
||||
return File{
|
||||
FileName: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
FileId: createResp.Data.FileID,
|
||||
Type: 2,
|
||||
Etag: etag,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return d.Upload(ctx, file, createResp, up)
|
||||
// 2. 上传分片
|
||||
err = d.Upload(ctx, file, createResp, up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 3. 上传完毕
|
||||
for range 60 {
|
||||
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
|
||||
// 返回错误代码未知,如:20103,文档也没有具体说
|
||||
if err == nil && uploadCompleteResp.Data.Completed && uploadCompleteResp.Data.FileID != 0 {
|
||||
up(100)
|
||||
return File{
|
||||
FileName: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
FileId: uploadCompleteResp.Data.FileID,
|
||||
Type: 2,
|
||||
Etag: etag,
|
||||
}, nil
|
||||
}
|
||||
// 若接口返回的completed为 false 时,则需间隔1秒继续轮询此接口,获取上传最终结果。
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return nil, fmt.Errorf("upload complete timeout")
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Open123)(nil)
|
||||
var _ driver.PutResult = (*Open123)(nil)
|
@ -73,7 +73,9 @@ func (f File) GetName() string {
|
||||
}
|
||||
|
||||
func (f File) CreateTime() time.Time {
|
||||
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.CreateAt)
|
||||
// 返回的时间没有时区信息,默认 UTC+8
|
||||
loc := time.FixedZone("UTC+8", 8*60*60)
|
||||
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.CreateAt, loc)
|
||||
if err != nil {
|
||||
return time.Now()
|
||||
}
|
||||
@ -81,7 +83,9 @@ func (f File) CreateTime() time.Time {
|
||||
}
|
||||
|
||||
func (f File) ModTime() time.Time {
|
||||
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.UpdateAt)
|
||||
// 返回的时间没有时区信息,默认 UTC+8
|
||||
loc := time.FixedZone("UTC+8", 8*60*60)
|
||||
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.UpdateAt, loc)
|
||||
if err != nil {
|
||||
return time.Now()
|
||||
}
|
||||
@ -154,6 +158,7 @@ type DownloadInfoResp struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// 创建文件V2返回
|
||||
type UploadCreateResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
@ -161,45 +166,15 @@ type UploadCreateResp struct {
|
||||
PreuploadID string `json:"preuploadID"`
|
||||
Reuse bool `json:"reuse"`
|
||||
SliceSize int64 `json:"sliceSize"`
|
||||
Servers []string `json:"servers"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UploadUrlResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
PresignedURL string `json:"presignedURL"`
|
||||
}
|
||||
}
|
||||
|
||||
// 上传完毕V2返回
|
||||
type UploadCompleteResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
Async bool `json:"async"`
|
||||
Completed bool `json:"completed"`
|
||||
FileID int64 `json:"fileID"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UploadAsyncResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
Completed bool `json:"completed"`
|
||||
FileID int64 `json:"fileID"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UploadResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
AccessKeyId string `json:"AccessKeyId"`
|
||||
Bucket string `json:"Bucket"`
|
||||
Key string `json:"Key"`
|
||||
SecretAccessKey string `json:"SecretAccessKey"`
|
||||
SessionToken string `json:"SessionToken"`
|
||||
FileId int64 `json:"FileId"`
|
||||
Reuse bool `json:"Reuse"`
|
||||
EndPoint string `json:"EndPoint"`
|
||||
StorageNode string `json:"StorageNode"`
|
||||
UploadId string `json:"UploadId"`
|
||||
} `json:"data"`
|
||||
}
|
186
origin/drivers/123_open/upload.go
Normal file
186
origin/drivers/123_open/upload.go
Normal file
@ -0,0 +1,186 @@
|
||||
package _123_open
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
// 创建文件 V2
|
||||
func (d *Open123) create(parentFileID int64, filename string, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) {
|
||||
var resp UploadCreateResp
|
||||
_, err := d.Request(UploadCreate, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"parentFileId": parentFileID,
|
||||
"filename": filename,
|
||||
"etag": strings.ToLower(etag),
|
||||
"size": size,
|
||||
"duplicate": duplicate,
|
||||
"containDir": containDir,
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// 上传分片 V2
|
||||
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
|
||||
uploadDomain := createResp.Data.Servers[0]
|
||||
size := file.GetSize()
|
||||
chunkSize := createResp.Data.SliceSize
|
||||
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadNums := (size + chunkSize - 1) / chunkSize
|
||||
thread := min(int(uploadNums), d.UploadThread)
|
||||
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
for partIndex := range uploadNums {
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
break
|
||||
}
|
||||
partIndex := partIndex
|
||||
partNumber := partIndex + 1 // 分片号从1开始
|
||||
offset := partIndex * chunkSize
|
||||
size := min(chunkSize, size-offset)
|
||||
var reader *stream.SectionReader
|
||||
var rateLimitedRd io.Reader
|
||||
sliceMD5 := ""
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
if reader == nil {
|
||||
var err error
|
||||
// 每个分片一个reader
|
||||
reader, err = ss.GetSectionReader(offset, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 计算当前分片的MD5
|
||||
sliceMD5, err = utils.HashReader(utils.MD5, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Do: func(ctx context.Context) error {
|
||||
// 重置分片reader位置,因为HashReader、上一次失败已经读取到分片EOF
|
||||
reader.Seek(0, io.SeekStart)
|
||||
|
||||
// 创建表单数据
|
||||
var b bytes.Buffer
|
||||
w := multipart.NewWriter(&b)
|
||||
// 添加表单字段
|
||||
err = w.WriteField("preuploadID", createResp.Data.PreuploadID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.WriteField("sliceNo", strconv.FormatInt(partNumber, 10))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.WriteField("sliceMD5", sliceMD5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 写入文件内容
|
||||
fw, err := w.CreateFormFile("slice", fmt.Sprintf("%s.part%d", file.GetName(), partNumber))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = utils.CopyWithBuffer(fw, rateLimitedRd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 创建请求并设置header
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadDomain+"/upload/v2/file/slice", &b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 设置请求头
|
||||
req.Header.Add("Authorization", "Bearer "+d.AccessToken)
|
||||
req.Header.Add("Content-Type", w.FormDataContentType())
|
||||
req.Header.Add("Platform", "open_platform")
|
||||
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
return fmt.Errorf("slice %d upload failed, status code: %d", partNumber, res.StatusCode)
|
||||
}
|
||||
var resp BaseResp
|
||||
respBody, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal(respBody, &resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Code != 0 {
|
||||
return fmt.Errorf("slice %d upload failed: %s", partNumber, resp.Message)
|
||||
}
|
||||
|
||||
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
|
||||
up(progress)
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
ss.FreeSectionReader(reader)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if err := threadG.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// 上传完毕
|
||||
func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
|
||||
var resp UploadCompleteResp
|
||||
_, err := d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"preuploadID": preuploadID,
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
@ -19,16 +19,14 @@ var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于
|
||||
AccessToken = InitApiInfo(Api+"/api/v1/access_token", 1)
|
||||
RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1)
|
||||
UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1)
|
||||
FileList = InitApiInfo(Api+"/api/v2/file/list", 4)
|
||||
FileList = InitApiInfo(Api+"/api/v2/file/list", 3)
|
||||
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 0)
|
||||
Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2)
|
||||
Move = InitApiInfo(Api+"/api/v1/file/move", 1)
|
||||
Rename = InitApiInfo(Api+"/api/v1/file/name", 1)
|
||||
Trash = InitApiInfo(Api+"/api/v1/file/trash", 2)
|
||||
UploadCreate = InitApiInfo(Api+"/upload/v1/file/create", 2)
|
||||
UploadUrl = InitApiInfo(Api+"/upload/v1/file/get_upload_url", 0)
|
||||
UploadComplete = InitApiInfo(Api+"/upload/v1/file/upload_complete", 0)
|
||||
UploadAsync = InitApiInfo(Api+"/upload/v1/file/upload_async_result", 1)
|
||||
UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2)
|
||||
UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0)
|
||||
)
|
||||
|
||||
func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user