mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-09-20 04:36:09 +08:00
Compare commits
1 Commits
next
...
renovate/g
Author | SHA1 | Date | |
---|---|---|---|
18c0f551fe |
@ -1,6 +1,3 @@
|
||||
### Default image is base. You can add other support by modifying BASE_IMAGE_TAG. The following parameters are supported: base (default), aria2, ffmpeg, aio
|
||||
ARG BASE_IMAGE_TAG=base
|
||||
|
||||
FROM alpine:edge AS builder
|
||||
LABEL stage=go-builder
|
||||
WORKDIR /app/
|
||||
@ -10,26 +7,21 @@ RUN go mod download
|
||||
COPY ./ ./
|
||||
RUN bash build.sh release docker
|
||||
|
||||
### Default image is base. You can add other support by modifying BASE_IMAGE_TAG. The following parameters are supported: base (default), aria2, ffmpeg, aio
|
||||
ARG BASE_IMAGE_TAG=base
|
||||
FROM openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
|
||||
LABEL MAINTAINER="OpenList"
|
||||
|
||||
ARG INSTALL_FFMPEG=false
|
||||
ARG INSTALL_ARIA2=false
|
||||
ARG USER=openlist
|
||||
ARG UID=1001
|
||||
ARG GID=1001
|
||||
LABEL MAINTAINER="OpenList"
|
||||
|
||||
WORKDIR /opt/openlist/
|
||||
|
||||
COPY --chmod=755 --from=builder /app/bin/openlist ./
|
||||
COPY --chmod=755 entrypoint.sh /entrypoint.sh
|
||||
RUN adduser -u ${UID} -g ${GID} -h /opt/openlist/data -D -s /bin/sh ${USER} \
|
||||
&& chown -R ${UID}:${GID} /opt \
|
||||
&& chown -R ${UID}:${GID} /entrypoint.sh
|
||||
|
||||
USER ${USER}
|
||||
RUN /entrypoint.sh version
|
||||
|
||||
ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
VOLUME /opt/openlist/data/
|
||||
EXPOSE 5244 5245
|
||||
CMD [ "/entrypoint.sh" ]
|
@ -1,26 +1,18 @@
|
||||
ARG BASE_IMAGE_TAG=base
|
||||
FROM ghcr.io/openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
|
||||
LABEL MAINTAINER="OpenList"
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG INSTALL_FFMPEG=false
|
||||
ARG INSTALL_ARIA2=false
|
||||
ARG USER=openlist
|
||||
ARG UID=1001
|
||||
ARG GID=1001
|
||||
LABEL MAINTAINER="OpenList"
|
||||
|
||||
WORKDIR /opt/openlist/
|
||||
|
||||
COPY --chmod=755 /build/${TARGETPLATFORM}/openlist ./
|
||||
COPY --chmod=755 entrypoint.sh /entrypoint.sh
|
||||
|
||||
RUN adduser -u ${UID} -g ${GID} -h /opt/openlist/data -D -s /bin/sh ${USER} \
|
||||
&& chown -R ${UID}:${GID} /opt \
|
||||
&& chown -R ${UID}:${GID} /entrypoint.sh
|
||||
|
||||
USER ${USER}
|
||||
RUN /entrypoint.sh version
|
||||
|
||||
ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
VOLUME /opt/openlist/data/
|
||||
EXPOSE 5244 5245
|
||||
CMD [ "/entrypoint.sh" ]
|
11
buf.gen.yaml
11
buf.gen.yaml
@ -1,11 +0,0 @@
|
||||
version: v1
|
||||
plugins:
|
||||
- plugin: buf.build/protocolbuffers/go:v1.36.7
|
||||
out: .
|
||||
opt:
|
||||
- paths=source_relative
|
||||
- plugin: buf.build/grpc/go:v1.5.1
|
||||
out: .
|
||||
opt:
|
||||
- paths=source_relative
|
||||
- require_unimplemented_servers=false
|
@ -1,42 +1,51 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v5/cmd/flags"
|
||||
"github.com/OpenListTeam/OpenList/v5/internal/bootstrap"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/bootstrap"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/bootstrap/data"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/db"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func Init(ctx context.Context) {
|
||||
if flags.Dev {
|
||||
flags.Debug = true
|
||||
}
|
||||
initLogrus()
|
||||
func Init() {
|
||||
bootstrap.InitConfig()
|
||||
bootstrap.InitDriverPlugins()
|
||||
bootstrap.Log()
|
||||
bootstrap.InitDB()
|
||||
data.InitData()
|
||||
bootstrap.InitStreamLimit()
|
||||
bootstrap.InitIndex()
|
||||
bootstrap.InitUpgradePatch()
|
||||
}
|
||||
|
||||
func Release() {
|
||||
|
||||
db.Close()
|
||||
}
|
||||
|
||||
func initLog(l *logrus.Logger) {
|
||||
if flags.Debug {
|
||||
l.SetLevel(logrus.DebugLevel)
|
||||
l.SetReportCaller(true)
|
||||
} else {
|
||||
l.SetLevel(logrus.InfoLevel)
|
||||
l.SetReportCaller(false)
|
||||
var pid = -1
|
||||
var pidFile string
|
||||
|
||||
func initDaemon() {
|
||||
ex, err := os.Executable()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
exPath := filepath.Dir(ex)
|
||||
_ = os.MkdirAll(filepath.Join(exPath, "daemon"), 0700)
|
||||
pidFile = filepath.Join(exPath, "daemon/pid")
|
||||
if utils.Exists(pidFile) {
|
||||
bytes, err := os.ReadFile(pidFile)
|
||||
if err != nil {
|
||||
log.Fatal("failed to read pid file", err)
|
||||
}
|
||||
id, err := strconv.Atoi(string(bytes))
|
||||
if err != nil {
|
||||
log.Fatal("failed to parse pid data", err)
|
||||
}
|
||||
pid = id
|
||||
}
|
||||
}
|
||||
func initLogrus() {
|
||||
formatter := logrus.TextFormatter{
|
||||
ForceColors: true,
|
||||
EnvironmentOverrideColors: true,
|
||||
TimestampFormat: "2006-01-02 15:04:05",
|
||||
FullTimestamp: true,
|
||||
}
|
||||
logrus.SetFormatter(&formatter)
|
||||
initLog(logrus.StandardLogger())
|
||||
}
|
||||
|
@ -1,40 +1,10 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
ConfigFile string
|
||||
DataDir string
|
||||
Debug bool
|
||||
NoPrefix bool
|
||||
Dev bool
|
||||
ForceBinDir bool
|
||||
LogStd bool
|
||||
|
||||
pwd string
|
||||
)
|
||||
|
||||
// Program working directory
|
||||
func PWD() string {
|
||||
if pwd != "" {
|
||||
return pwd
|
||||
}
|
||||
if ForceBinDir {
|
||||
ex, err := os.Executable()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
pwd = filepath.Dir(ex)
|
||||
return pwd
|
||||
}
|
||||
d, err := os.Getwd()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
pwd = d
|
||||
return d
|
||||
}
|
||||
|
11
cmd/root.go
11
cmd/root.go
@ -4,7 +4,10 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v5/cmd/flags"
|
||||
"github.com/OpenListTeam/OpenList/v4/cmd/flags"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/internal/archive"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/internal/offline_download"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -24,10 +27,10 @@ func Execute() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.PersistentFlags().StringVarP(&flags.ConfigFile, "config", "c", "data/config.json", "config file")
|
||||
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data folder")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.Debug, "debug", false, "start with debug mode")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.NoPrefix, "no-prefix", false, "disable env prefix")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.Dev, "dev", false, "start with dev mode")
|
||||
RootCmd.PersistentFlags().BoolVarP(&flags.ForceBinDir, "force-bin-dir", "f", false, "force to use the directory where the binary file is located as data directory")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.LogStd, "log-std", false, "force to log to std")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.ForceBinDir, "force-bin-dir", false, "Force to use the directory where the binary file is located as data directory")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.LogStd, "log-std", false, "Force to log to std")
|
||||
}
|
||||
|
199
cmd/server.go
199
cmd/server.go
@ -13,9 +13,15 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v5/cmd/flags"
|
||||
"github.com/OpenListTeam/OpenList/v5/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v5/server"
|
||||
"github.com/OpenListTeam/OpenList/v4/cmd/flags"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/bootstrap"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/middlewares"
|
||||
"github.com/OpenListTeam/sftpd-openlist"
|
||||
ftpserver "github.com/fclairamb/ftpserverlib"
|
||||
"github.com/gin-gonic/gin"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
@ -29,127 +35,220 @@ var ServerCmd = &cobra.Command{
|
||||
Short: "Start the server at the specified address",
|
||||
Long: `Start the server at the specified address
|
||||
the address is defined in config file`,
|
||||
Run: func(_ *cobra.Command, args []string) {
|
||||
serverCtx, serverCancel := context.WithCancel(context.Background())
|
||||
defer serverCancel()
|
||||
Init(serverCtx)
|
||||
|
||||
if !flags.Debug {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Init()
|
||||
if conf.Conf.DelayedStart != 0 {
|
||||
utils.Log.Infof("delayed start for %d seconds", conf.Conf.DelayedStart)
|
||||
time.Sleep(time.Duration(conf.Conf.DelayedStart) * time.Second)
|
||||
}
|
||||
bootstrap.InitOfflineDownloadTools()
|
||||
bootstrap.LoadStorages()
|
||||
bootstrap.InitTaskManager()
|
||||
if !flags.Debug && !flags.Dev {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
}
|
||||
r := gin.New()
|
||||
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out))
|
||||
r.Use(gin.RecoveryWithWriter(log.StandardLogger().Out))
|
||||
server.Init(r)
|
||||
|
||||
// gin log
|
||||
if conf.Conf.Log.Filter.Enable {
|
||||
r.Use(middlewares.FilteredLogger())
|
||||
} else {
|
||||
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out))
|
||||
}
|
||||
r.Use(gin.RecoveryWithWriter(log.StandardLogger().Out))
|
||||
|
||||
server.Init(r)
|
||||
var httpHandler http.Handler = r
|
||||
if conf.Conf.Scheme.EnableH2c {
|
||||
httpHandler = h2c.NewHandler(r, &http2.Server{})
|
||||
}
|
||||
var httpSrv, httpsSrv, unixSrv *http.Server
|
||||
if conf.Conf.Scheme.HttpPort > 0 {
|
||||
if conf.Conf.Scheme.HttpPort != -1 {
|
||||
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
|
||||
log.Infoln("start HTTP server", "@", httpBase)
|
||||
fmt.Printf("start HTTP server @ %s\n", httpBase)
|
||||
utils.Log.Infof("start HTTP server @ %s", httpBase)
|
||||
httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler}
|
||||
go func() {
|
||||
err := httpSrv.ListenAndServe()
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
log.Errorln("start HTTP server", ":", err)
|
||||
serverCancel()
|
||||
utils.Log.Fatalf("failed to start http: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.Scheme.HttpsPort > 0 {
|
||||
if conf.Conf.Scheme.HttpsPort != -1 {
|
||||
httpsBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpsPort)
|
||||
log.Infoln("start HTTPS server", "@", httpsBase)
|
||||
fmt.Printf("start HTTPS server @ %s\n", httpsBase)
|
||||
utils.Log.Infof("start HTTPS server @ %s", httpsBase)
|
||||
httpsSrv = &http.Server{Addr: httpsBase, Handler: r}
|
||||
go func() {
|
||||
err := httpsSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
log.Errorln("start HTTPS server", ":", err)
|
||||
serverCancel()
|
||||
utils.Log.Fatalf("failed to start https: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.Scheme.UnixFile != "" {
|
||||
log.Infoln("start Unix server", "@", conf.Conf.Scheme.UnixFile)
|
||||
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
|
||||
utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile)
|
||||
unixSrv = &http.Server{Handler: httpHandler}
|
||||
go func() {
|
||||
listener, err := net.Listen("unix", conf.Conf.Scheme.UnixFile)
|
||||
if err != nil {
|
||||
log.Errorln("start Unix server", ":", err)
|
||||
serverCancel()
|
||||
return
|
||||
utils.Log.Fatalf("failed to listen unix: %+v", err)
|
||||
}
|
||||
|
||||
// set socket file permission
|
||||
mode, err := strconv.ParseUint(conf.Conf.Scheme.UnixFilePerm, 8, 32)
|
||||
if err != nil {
|
||||
log.Errorln("parse unix_file_perm", ":", err)
|
||||
utils.Log.Errorf("failed to parse socket file permission: %+v", err)
|
||||
} else {
|
||||
err = os.Chmod(conf.Conf.Scheme.UnixFile, os.FileMode(mode))
|
||||
if err != nil {
|
||||
log.Errorln("chmod socket file", ":", err)
|
||||
utils.Log.Errorf("failed to chmod socket file: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = unixSrv.Serve(listener)
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
log.Errorln("start Unix server", ":", err)
|
||||
serverCancel()
|
||||
utils.Log.Fatalf("failed to start unix: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if conf.Conf.S3.Port != -1 && conf.Conf.S3.Enable {
|
||||
s3r := gin.New()
|
||||
s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
|
||||
server.InitS3(s3r)
|
||||
s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port)
|
||||
fmt.Printf("start S3 server @ %s\n", s3Base)
|
||||
utils.Log.Infof("start S3 server @ %s", s3Base)
|
||||
go func() {
|
||||
var err error
|
||||
if conf.Conf.S3.SSL {
|
||||
httpsSrv = &http.Server{Addr: s3Base, Handler: s3r}
|
||||
err = httpsSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
|
||||
}
|
||||
if !conf.Conf.S3.SSL {
|
||||
httpSrv = &http.Server{Addr: s3Base, Handler: s3r}
|
||||
err = httpSrv.ListenAndServe()
|
||||
}
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
utils.Log.Fatalf("failed to start s3 server: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
var ftpDriver *server.FtpMainDriver
|
||||
var ftpServer *ftpserver.FtpServer
|
||||
if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable {
|
||||
var err error
|
||||
ftpDriver, err = server.NewMainDriver()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to start ftp driver: %s", err.Error())
|
||||
} else {
|
||||
fmt.Printf("start ftp server on %s\n", conf.Conf.FTP.Listen)
|
||||
utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen)
|
||||
go func() {
|
||||
ftpServer = ftpserver.NewFtpServer(ftpDriver)
|
||||
err = ftpServer.ListenAndServe()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("problem ftp server listening: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
var sftpDriver *server.SftpDriver
|
||||
var sftpServer *sftpd.SftpServer
|
||||
if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable {
|
||||
var err error
|
||||
sftpDriver, err = server.NewSftpDriver()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to start sftp driver: %s", err.Error())
|
||||
} else {
|
||||
fmt.Printf("start sftp server on %s", conf.Conf.SFTP.Listen)
|
||||
utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen)
|
||||
go func() {
|
||||
sftpServer = sftpd.NewSftpServer(sftpDriver)
|
||||
err = sftpServer.RunServer()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("problem sftp server listening: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
// Wait for interrupt signal to gracefully shutdown the server with
|
||||
// a timeout of 1 second.
|
||||
quit := make(chan os.Signal, 1)
|
||||
// kill (no param) default send syscanll.SIGTERM
|
||||
// kill -2 is syscall.SIGINT
|
||||
// kill -9 is syscall. SIGKILL but can"t be catch, so don't need add it
|
||||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||
select {
|
||||
case <-quit:
|
||||
case <-serverCtx.Done():
|
||||
}
|
||||
|
||||
log.Println("shutdown server...")
|
||||
<-quit
|
||||
utils.Log.Println("Shutdown server...")
|
||||
fs.ArchiveContentUploadTaskManager.RemoveAll()
|
||||
Release()
|
||||
|
||||
quitCtx, quitCancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer quitCancel()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
var wg sync.WaitGroup
|
||||
if httpSrv != nil {
|
||||
if conf.Conf.Scheme.HttpPort != -1 {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := httpSrv.Shutdown(quitCtx); err != nil {
|
||||
log.Errorln("shutdown HTTP server", ":", err)
|
||||
if err := httpSrv.Shutdown(ctx); err != nil {
|
||||
utils.Log.Fatal("HTTP server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if httpsSrv != nil {
|
||||
if conf.Conf.Scheme.HttpsPort != -1 {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := httpsSrv.Shutdown(quitCtx); err != nil {
|
||||
log.Errorln("shutdown HTTPS server", ":", err)
|
||||
if err := httpsSrv.Shutdown(ctx); err != nil {
|
||||
utils.Log.Fatal("HTTPS server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if unixSrv != nil {
|
||||
if conf.Conf.Scheme.UnixFile != "" {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := unixSrv.Shutdown(quitCtx); err != nil {
|
||||
log.Errorln("shutdown Unix server", ":", err)
|
||||
if err := unixSrv.Shutdown(ctx); err != nil {
|
||||
utils.Log.Fatal("Unix server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable && ftpServer != nil && ftpDriver != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ftpDriver.Stop()
|
||||
if err := ftpServer.Stop(); err != nil {
|
||||
utils.Log.Fatal("FTP server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable && sftpServer != nil && sftpDriver != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := sftpServer.Close(); err != nil {
|
||||
utils.Log.Fatal("SFTP server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
log.Println("server exit")
|
||||
utils.Log.Println("Server exit")
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(ServerCmd)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
// and all subcommands, e.g.:
|
||||
// serverCmd.PersistentFlags().String("foo", "", "A help for foo")
|
||||
|
||||
// Cobra supports local flags which will only run when this command
|
||||
// is called directly, e.g.:
|
||||
// serverCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
}
|
||||
|
||||
// OutOpenListInit 暴露用于外部启动server的函数
|
||||
|
@ -186,7 +186,9 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
preHash = strings.ToUpper(preHash)
|
||||
fullHash := stream.GetHash().GetHash(utils.SHA1)
|
||||
if len(fullHash) != utils.SHA1.Width {
|
||||
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
@ -321,7 +321,7 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
|
||||
err error
|
||||
)
|
||||
|
||||
tmpF, err := s.CacheFullAndWriter(&up, nil)
|
||||
tmpF, err := s.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
@ -239,7 +239,9 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
||||
}
|
||||
sha1 := file.GetHash().GetHash(utils.SHA1)
|
||||
if len(sha1) != utils.SHA1.Width {
|
||||
_, sha1, err = stream.CacheFullAndHash(file, &up, utils.SHA1)
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, sha1, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.SHA1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
@ -86,14 +86,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
|
||||
|
||||
fileSize := stream.GetSize()
|
||||
chunkSize := calPartSize(fileSize)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
|
||||
parts := make([]oss.UploadPart, partNum)
|
||||
offset := int64(0)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := int64(1); i <= partNum; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -120,7 +119,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second))
|
||||
ss.FreeSectionReader(rd)
|
||||
ss.RecycleSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
@ -182,7 +182,9 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
|
||||
etag := file.GetHash().GetHash(utils.MD5)
|
||||
var err error
|
||||
if len(etag) < utils.MD5.Width {
|
||||
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
@ -81,12 +81,6 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
if size > chunkSize {
|
||||
chunkCount = int((size + chunkSize - 1) / chunkSize)
|
||||
}
|
||||
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastChunkSize := size % chunkSize
|
||||
if lastChunkSize == 0 {
|
||||
lastChunkSize = chunkSize
|
||||
@ -98,6 +92,10 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
batchSize = 10
|
||||
getS3UploadUrl = d.getS3PreSignedUrls
|
||||
}
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
thread := min(int(chunkCount), d.UploadThread)
|
||||
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
||||
@ -182,7 +180,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
ss.FreeSectionReader(reader)
|
||||
ss.RecycleSectionReader(reader)
|
||||
},
|
||||
})
|
||||
}
|
@ -132,7 +132,9 @@ func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
||||
// etag 文件md5
|
||||
etag := file.GetHash().GetHash(utils.MD5)
|
||||
if len(etag) < utils.MD5.Width {
|
||||
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
@ -46,12 +46,6 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
||||
uploadDomain := createResp.Data.Servers[0]
|
||||
size := file.GetSize()
|
||||
chunkSize := createResp.Data.SliceSize
|
||||
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadNums := (size + chunkSize - 1) / chunkSize
|
||||
thread := min(int(uploadNums), d.UploadThread)
|
||||
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
||||
@ -59,6 +53,10 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for partIndex := range uploadNums {
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
break
|
||||
@ -159,7 +157,7 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
ss.FreeSectionReader(reader)
|
||||
ss.RecycleSectionReader(reader)
|
||||
},
|
||||
})
|
||||
}
|
@ -522,7 +522,9 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
var err error
|
||||
fullHash := stream.GetHash().GetHash(utils.SHA256)
|
||||
if len(fullHash) != utils.SHA256.Width {
|
||||
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA256)
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
@ -5,19 +5,17 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/skip2/go-qrcode"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/skip2/go-qrcode"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
|
||||
"github.com/go-resty/resty/v2"
|
||||
@ -313,14 +311,11 @@ func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
|
||||
|
||||
// 旧版本上传,家庭云不支持覆盖
|
||||
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||
fileMd5 := file.GetHash().GetHash(utils.MD5)
|
||||
var tempFile = file.GetFile()
|
||||
var err error
|
||||
if len(fileMd5) != utils.MD5.Width {
|
||||
tempFile, fileMd5, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||
} else if tempFile == nil {
|
||||
tempFile, err = file.CacheFullAndWriter(&up, nil)
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -350,7 +345,7 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
||||
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
||||
}
|
||||
|
||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, tempFile, isFamily)
|
||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily)
|
||||
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
||||
return nil, err
|
||||
}
|
@ -500,8 +500,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss, err := stream.NewStreamSectionReader(file, int(sliceSize), &up)
|
||||
ss, err := stream.NewStreamSectionReader(file, int(sliceSize))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -582,7 +581,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
ss.FreeSectionReader(reader)
|
||||
ss.RecycleSectionReader(reader)
|
||||
},
|
||||
},
|
||||
)
|
||||
@ -858,7 +857,9 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo
|
||||
|
||||
// 旧版本上传,家庭云不支持覆盖
|
||||
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||
tempFile, fileMd5, err := stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
tempFile, fileMd5, err := stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
@ -13,7 +12,6 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
@ -162,18 +160,25 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
sign.Sign(reqPath)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
resultLink := *link
|
||||
resultLink.SyncClosers = utils.NewSyncClosers(link)
|
||||
if args.Redirect {
|
||||
return &resultLink, nil
|
||||
return link, nil
|
||||
}
|
||||
|
||||
resultLink := &model.Link{
|
||||
URL: link.URL,
|
||||
Header: link.Header,
|
||||
RangeReader: link.RangeReader,
|
||||
MFile: link.MFile,
|
||||
Concurrency: link.Concurrency,
|
||||
PartSize: link.PartSize,
|
||||
ContentLength: link.ContentLength,
|
||||
SyncClosers: utils.NewSyncClosers(link),
|
||||
}
|
||||
if resultLink.ContentLength == 0 {
|
||||
resultLink.ContentLength = fi.GetSize()
|
||||
}
|
||||
if resultLink.MFile != nil {
|
||||
return &resultLink, nil
|
||||
return resultLink, nil
|
||||
}
|
||||
if d.DownloadConcurrency > 0 {
|
||||
resultLink.Concurrency = d.DownloadConcurrency
|
||||
@ -181,7 +186,7 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
if d.DownloadPartSize > 0 {
|
||||
resultLink.PartSize = d.DownloadPartSize * utils.KB
|
||||
}
|
||||
return &resultLink, nil
|
||||
return resultLink, nil
|
||||
}
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
@ -308,29 +313,24 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer,
|
||||
reqPath, err := d.getReqPath(ctx, dstDir, true)
|
||||
if err == nil {
|
||||
if len(reqPath) == 1 {
|
||||
storage, reqActualPath, err := op.GetStorageAndActualPath(*reqPath[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return op.Put(ctx, storage, reqActualPath, &stream.FileStream{
|
||||
Obj: s,
|
||||
Mimetype: s.GetMimetype(),
|
||||
Reader: s,
|
||||
}, up)
|
||||
return fs.PutDirectly(ctx, *reqPath[0], &stream.FileStream{
|
||||
Obj: s,
|
||||
Mimetype: s.GetMimetype(),
|
||||
WebPutAsTask: s.NeedStore(),
|
||||
Reader: s,
|
||||
})
|
||||
} else {
|
||||
file, err := s.CacheFullAndWriter(nil, nil)
|
||||
file, err := s.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
count := float64(len(reqPath) + 1)
|
||||
up(100 / count)
|
||||
for i, path := range reqPath {
|
||||
for _, path := range reqPath {
|
||||
err = errors.Join(err, fs.PutDirectly(ctx, *path, &stream.FileStream{
|
||||
Obj: s,
|
||||
Mimetype: s.GetMimetype(),
|
||||
Reader: file,
|
||||
Obj: s,
|
||||
Mimetype: s.GetMimetype(),
|
||||
WebPutAsTask: s.NeedStore(),
|
||||
Reader: file,
|
||||
}))
|
||||
up(float64(i+2) / float64(count) * 100)
|
||||
_, e := file.Seek(0, io.SeekStart)
|
||||
if e != nil {
|
||||
return errors.Join(err, e)
|
||||
@ -402,24 +402,10 @@ func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveIn
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
reqPath := stdpath.Join(dst, sub)
|
||||
link, err := d.extract(ctx, reqPath, args)
|
||||
if err != nil {
|
||||
continue
|
||||
link, err := d.extract(ctx, dst, sub, args)
|
||||
if err == nil {
|
||||
return link, nil
|
||||
}
|
||||
if link == nil {
|
||||
return &model.Link{
|
||||
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(reqPath, true),
|
||||
utils.EncodePath(args.InnerPath, true),
|
||||
url.QueryEscape(args.Password),
|
||||
sign.SignArchive(reqPath)),
|
||||
}, nil
|
||||
}
|
||||
resultLink := *link
|
||||
resultLink.SyncClosers = utils.NewSyncClosers(link)
|
||||
return &resultLink, nil
|
||||
}
|
||||
return nil, errs.NotImplement
|
||||
}
|
@ -2,6 +2,8 @@ package alias
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
@ -10,6 +12,8 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
)
|
||||
|
||||
@ -136,7 +140,8 @@ func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.Arc
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Alias) extract(ctx context.Context, reqPath string, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
reqPath := stdpath.Join(dst, sub)
|
||||
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -144,12 +149,20 @@ func (d *Alias) extract(ctx context.Context, reqPath string, args model.ArchiveI
|
||||
if _, ok := storage.(driver.ArchiveReader); !ok {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(reqPath)) {
|
||||
_, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||
if err == nil {
|
||||
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(sub)) {
|
||||
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, nil
|
||||
link := &model.Link{
|
||||
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(reqPath, true),
|
||||
utils.EncodePath(args.InnerPath, true),
|
||||
url.QueryEscape(args.Password),
|
||||
sign.SignArchive(reqPath)),
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
|
||||
return link, err
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user