Compare commits

..

113 Commits

Author SHA1 Message Date
0793b6e754 fix(deps): update module github.com/golang-jwt/jwt/v4 to v5 2025-08-13 17:32:21 +00:00
d0c22a1ecb feat(ci): add the default user for docker image (#1036)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-12 09:51:40 +08:00
57fceabcf4 perf(stream): improve file stream range reading and caching mechanism (#1001)
* perf(stream): improve file stream range reading and caching mechanism

* 。

* add bytes_test.go

* fix(stream): handle EOF and buffer reading more gracefully

* 注释

* refactor: update CacheFullAndWriter to accept pointer for UpdateProgress

* update tests

* Update drivers/google_drive/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>

* 更优雅的克隆Link

* 修复stream已缓存但无法重复读取

* 将Bytes类型重命名为Reader

* 修复栈溢出

* update tests

---------

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-11 23:41:22 +08:00
8c244a984d refactor(assets): migrate to resource domain (#975)
* refactor(assets): migrate to resource domain

* feat(bootstrap): add migration value for logo and favicon settings
2025-08-10 09:57:33 +08:00
df479ba806 fix(aliyundrive_open): limit rate for every request (close #724) (#1011)
* fix(aliyundrive_open): limit rate for `Remove` and `MakeDir`; reduce limit for `List` and `Link` (close #724)

* Update drivers/aliyundrive_open/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: 火星大王 <34576789+huoxingdawang@users.noreply.github.com>

* Update drivers/aliyundrive_open/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: 火星大王 <34576789+huoxingdawang@users.noreply.github.com>

* fix(aliyundrive_open): limit rate for every request

* fix(aliyundrive_open): fix limiter not work on reference driver

* fix(aliyundrive_open): typo

* fix(aliyundrive_open): limiter not set to nil after free

* fix(aliyundrive_share): limit rate for every request

---------

Signed-off-by: 火星大王 <34576789+huoxingdawang@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-10 09:55:20 +08:00
5ae8e96237 feat(123_open): update Put method to return model.Obj (#1008)
* feat(123_open): update Put method to return model.Obj

* fix(123_open): declear time zones

* chore(123_open): fix typo

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123_open): use fixed timezone

* fix(123_open): implement PutResult interface for Open123 driver

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: Suyunmeng <69945917+Suyunmeng@users.noreply.github.com>
2025-08-09 15:09:12 +08:00
aa0ced47b0 fix(webdav): Handle HEAD requests for directories with appropriate headers (#1015)
Implement handling of HEAD requests for directories by setting the correct Content-Type and Content-Length headers.
2025-08-09 13:57:09 +08:00
ab747d9052 feat(config): Add PWA manifest.json endpoint for web app installation (#990)
* feat(config): Add PWA manifest.json endpoint for web app installation

* fix: Update comment to English in manifest handler

* fix: fix EOL

* fix: Remove unused fmt import from manifest handler

* feat: use site settings for manifest name and icon

* fix(manifest): Move manifest.json route to static handler for proper CDN handling

* feat: move manifest.json handler to static package and improve path handling

* feat: Add custom static file handler to prevent manifest.json conflicts

* fix: Integrate manifest.json handling into static file serving routes

* fix: Simplify PWA manifest scope handling and static file serving

- Remove CDN-specific logic for PWA manifest scope and start_url
- Always use base path for PWA scope regardless of CDN configuration
- Replace manual file serving logic with http.FileServer for static assets

* fix: Ensure consistent base path handling in site configuration and manifest path construction

* fix: Refactor trailing slash handling in site configuration

* feat(static): update manifest path handling and add route for manifest.json
2025-08-08 20:07:51 +08:00
93c06213d4 feat(local): add directory size support (#624)
* feat(local): add directory size support

* fix(local): fix and improve directory size calculation

* style(local): fix code style

* style(local): fix code style

* style(local): fix code style

* fix(local): refresh directory size when force refresh

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>

* fix:(local): Avoid traversing the parent's parent, which leads to an endless loop

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>

* fix(local:) refresh dir size only enabled

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>

* fix(local): logical error && add RecalculateDirSize && cleaner code for int64

* feat(local): add Benchmark for CalculateDirSize

* refactor(local): 优化移动中对于错误的判断。

---------

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>
Co-authored-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>
2025-08-08 16:59:16 +08:00
b9b8eed285 [skip ci]feat(ci): add FRONTEND_REPO variable to workflows and build script (#1006) 2025-08-08 16:36:22 +08:00
317d190b77 fix(ftp): create a new connection for each download (#989) 2025-08-06 20:35:01 +08:00
52d7d819ad feat(lenovonas_share): add thumb (#986) 2025-08-06 17:34:43 +08:00
0483e0f868 feat(driver_strm): also shown some files with strm (#969)
* feat(driver_strm): Also shown some files with strm

Allow user set some file types that need to shown with strm, usually subtitles

Most of code was copy and managed from drivers/alias

* 优化

* 优化

* 。

* 添加注释

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
2025-08-06 15:40:48 +08:00
08dae4f55f feat(123_open): update upload api v2 (#976) 2025-08-06 15:27:13 +08:00
9ac0484bc0 perf(ftp): improve concurrent Link response; fix alias/local driver issues (#974) 2025-08-06 13:32:37 +08:00
8cf15183a0 perf: optimize upload (#554)
* pref(115,123): optimize upload

* chore

* aliyun_open, google_drive

* fix bug

* chore

* cloudreve, cloudreve_v4, onedrive, onedrive_app

* chore(conf): add `max_buffer_limit` option

* 123pan multithread upload

* doubao

* google_drive

* chore

* chore

* chore: 计算分片数量的代码

* MaxBufferLimit自动挡

* MaxBufferLimit自动挡

* 189pc

* errorgroup添加Lifecycle

* 查缺补漏

* Conf.MaxBufferLimit单位为MB

* 。

---------

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-05 21:42:54 +08:00
c8f2aaaa55 feat(cmd): add delete command for storage (#952) 2025-08-04 17:30:43 +08:00
1208bd0a83 fix(fs): nil interface not equal to nil (#971)
https://go.dev/doc/faq#nil_error
2025-08-03 23:51:11 +08:00
6b096bcad4 fix(fs): deadlock when get link error (#963) 2025-08-02 17:49:53 +08:00
58dbf088f9 fix(fs): forget cache when get link error (#956) 2025-08-02 11:03:34 +08:00
05ff7908f2 fix(strm): encoded path is ineffective (#951) 2025-08-02 00:23:18 +08:00
a703b736c9 feat(offline_download): filter empty URLs in offline download requests (#948) 2025-08-01 16:12:21 +08:00
e458f2ab53 fix(bootstrap): add newline after initial admin password output (#943)
fix(bootstrap): add newline after initial admin  password output
2025-08-01 13:43:41 +08:00
a5a22e7085 fix(local): Treat junction as directory in Windows. (#809)
Treat junction as directory in Windows.
2025-07-31 13:54:56 +08:00
9469c95b14 fix(security): potential XSS vulnerabilities (#896) 2025-07-31 12:57:20 +08:00
cf912dcf7a fix(cmd): output to console (#920)
fix(cmd): output to terminal
2025-07-31 11:44:00 +08:00
ccd4af26e5 feat(patch): add migration from Alist V3 driver to OpenList (#919)
* feat(patch): add migration from Alist V3 driver to OpenList

* chore(patch): improve logging
2025-07-31 11:43:21 +08:00
1682e873d6 feat(search): enhanced meilisearch search experience (#864)
* feat(search): enhanced `meilisearch` search experience
- upgrade `meilisearch` dependency
- support subdirectory search
- optimize searchDocument fields for subdirectory search
- specify full index uid instead of index prefix

* fix(search): more fixes to `meilisearch`
- make use of context where context was not used
- remove code of waiting task in deletion process, as tasks are queued and will be executed orderly (if tasks were submitted to the queue successfully), which can improve `AutoUpdate` performance
2025-07-31 11:24:22 +08:00
54ae7e6d9b feat(115_open): Add GetObjInfo to accelerate getting link (#888)
* feat(115_open): Add GetObjInfo to accelerate getting link

* feat(fs): use cache directly when cache exist
2025-07-31 11:20:02 +08:00
991da7d87f feat(strm): add local mode (#885)
* feat(strm): add local mode

* Update drivers/strm/meta.go

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>

* feat(strm): local mode add sign

---------

Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-31 11:18:59 +08:00
Dgs
a498091aef fix(123&&123_share): fix link request header referer (#915) 2025-07-31 10:10:38 +08:00
976c82bb2b fix(drivers): update time-related fields to int64 (#913)
- In doubao/types.go:
  - Change LastUpdateTime from int to int64
  - Change UserCreateTime from int to int64
- In doubao_share/types.go:
  - Change CreateTime and UpdateTime from int to int64 in ShareInfo and FilePath
- In quark_uc/types.go:
  - Change UpdateTime from int to int64 in TranscodingResp

These changes ensure consistent and accurate representation of timestamp data across the project.
2025-07-31 10:10:32 +08:00
5b41a3bdff feat(ci): Add support for LoongArch64 architecture builds (#907) 2025-07-31 10:10:19 +08:00
19d1a3b785 refactor(ci): Refactor Docker build to use base images and dynamic Dockerfile generation (#904) 2025-07-30 15:04:29 +08:00
3c7b0c4999 fix(qb): Configure HTTP client with connection pooling and fix resource leaks in qBittorrent client. (#898) 2025-07-29 21:56:36 +08:00
d6867b4ab6 fix(user): show admin password on first start (#883)
* fix: fix admin password not shown in first start
* chore: add time dependence

Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>

* fix: fix log format

Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>

---------

Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>
Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
2025-07-29 21:36:27 +08:00
11cf561307 fix(security): potential XSS vulnerabilities (#880)
* fix(security): potential XSS vulnerabilities

* chore: replace alist identifier to openlist identifier

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>

---------

Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>
Co-authored-by: ILoveScratch <ilovescratch@foxmail.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-29 20:17:11 +08:00
239b58f63e fix(ci):Disable linux/s390x Docker builds (#887) 2025-07-29 16:22:50 +08:00
7da06655cb feat(setting): add site version information (#859)
* feat(setting): add site version information

* feat(conf): update conf.WebVersion to rolling

* fix(static): update condition to check conf.Version instead of conf.WebVersion

* fix(build.sh): use rolling release for web frontend in dev and beta builds

* chore(build.sh): update GitAuthor to The OpenList Projects Contributors

* fix(static): update condition to check conf.WebVersion
2025-07-29 09:49:33 +08:00
e0b3a611ba feat(thunderx,pikpak): add offline download support for ThunderX; add ctx to specific PikPak functions (#879)
* feat(thunderx,pikpak): add offline download support for ThunderX; add ctx to specific PikPak functions

* Update internal/offline_download/tool/download.go

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: 花月喵梦 <152958106+nekohy@users.noreply.github.com>

---------

Signed-off-by: 花月喵梦 <152958106+nekohy@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-29 09:46:28 +08:00
be1ad08a83 feat(ci):Add Windows 7 and LoongArch Release build support (#857)
* feat:Add Windows 7 and LoongArch old world build support (#30)

* feat:Add Windows 7 and Loongson old world build support

- Add BuildWin7() function with patched Go compiler for Windows 7 compatibility
- Add BuildLoongOldWorld() function for linux-loong64-abi1.0 target
- Create Zig-based wrapper scripts for Windows 7 cross-compilation
- Integrate new build functions into existing release workflows

* fix(win7):Add MinGW-w64 toolchain and improve LoongArch ABI isolation

- Install MinGW-w64 cross-compilation toolchain for Win7 compatibility
- Replace Zig compiler wrappers with MinGW-w64 for Windows 7 builds
- Add Go build cache cleaning to prevent LoongArch ABI1.0/ABI2.0 cross-contamination
- Force clean rebuilds (-a flag) for LoongArch builds to ensure ABI compatibility

* feat: add Windows 7 build support to beta release workflow

* feat: add LoongArch ABI2.0 support alongside existing ABI1.0 build (#31)

- Add BuildWin7() function with patched Go compiler for Windows 7 compatibility
- Add BuildLoongOldWorld() function for linux-loong64-abi1.0 target
- Create Zig-based wrapper scripts for Windows 7 cross-compilation
- Integrate new build functions into existing release workflows
- Install MinGW-w64 cross-compilation toolchain for Win7 compatibility
- Replace Zig compiler wrappers with MinGW-w64 for Windows 7 builds
- Add Go build cache cleaning to prevent LoongArch ABI1.0/ABI2.0 cross-contamination
- Force clean rebuilds (-a flag) for LoongArch builds to ensure ABI compatibility

* [skip ci]refactor:Refactor LoongArch builds to separate glibc from musl compilation

* fix(go-cache):Improve error handling for Go module cache cleaning in LoongArch builds

* feat(build): Enhance LoongArch build process with improved toolchain setup and cache management

* fix(build): Update Windows 7 target naming in build scripts and workflows

* refactor(build): Replace MinGW-w64 with Zig for Windows 7 toolchain in build scripts

* chore(cgo): remove cgo-actions subproject
2025-07-27 00:27:31 +08:00
4e9c30f49d feat(fs): full support webdav cross-driver copy and move (#823)
* fix(fs): restore webdav cross-driver copy and move

* fix bug

* webdav支持复制、移动 文件夹

* 优化

* 。
2025-07-26 00:27:46 +08:00
0ee31a3f36 fix(crypt): wrong ContentLength 2025-07-25 19:55:22 +08:00
23bddf991e feat(drivers): enable local sorting for cloudreve, ilanzou (#840)
* feat(cloudreve): enable local sorting

* feat(ilanzou): enable local sorting
2025-07-25 18:01:19 +08:00
da8d6607cf fix(static): support logo replacement (#834 Close #754) 2025-07-25 17:12:51 +08:00
6134574dac fix(fs): rename bug (#832)
* fix(fs): rename bug

* chore

* fix bug

* .

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-25 13:42:39 +08:00
b273232f87 refactor(log): redir utils.Log to logrus after init (#833) 2025-07-25 13:38:45 +08:00
358e4d851e refactor(log): filter (#816) 2025-07-25 11:33:27 +08:00
e8a1ed638a fix(ci):Exclude FreeBSD patch releases from version detection 2025-07-24 22:41:45 +08:00
4106e2a996 fix(static): correct CDN fetch condition for index.html (#814) 2025-07-24 22:28:58 +08:00
c2271df64e fix(ci): update OpenListTeam/cgo-actions to v1.2.2 to fix loongarch64 build (#811)
* Update beta_release.yml

* Update build.yml
2025-07-24 22:20:23 +08:00
d4b8570eb8 fix(docker): Fix the runsvdir permission issue caused by su-exec user switching and resolve the RUN_ARIA2 variable compatibility problem. (#805) 2025-07-24 17:22:49 +08:00
bd297e8ccc fix(deps): update module golang.org/x/image to v0.29.0 (#804)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 16:22:19 +08:00
923d282c8a fix(deps): update module github.com/sheltonzhu/115driver to v1.1.0 (#803)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 16:21:32 +08:00
4d8c4d7089 fix(deps): update module github.com/coreos/go-oidc to v2.3.0+incompatible (#586)
* fix(deps): update module github.com/coreos/go-oidc to v2.3.0+incompatible

* Update go.mod

Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>

---------

Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-24 16:21:03 +08:00
e93ab76036 feat(task-group): introduce TaskGroupCoordinator for coordinated task execution (#721)
* feat(task): add task hook,batch task
refactor(move): move use CopyTask

* Update internal/task/batch_task/refresh.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>

* fix: upload task allFinish judge

* Update internal/task/batch_task/refresh.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>

* feat: enhance concurrency safety

* 优化代码

* 解压缩

* 修复死锁

* refactor(move): move as task

* 重构,优化

* .

* 优化,修复bug

* .

* 修复bug

* feat: add task retry judge

* 代理Task.SetState函数来判断Task的生命周期

* chore: use OnSucceeded、OnFailed、OnBeforeRetry functions

* 优化

* 优化,去除重复代码

* .

* 优化

* .

* webdav

* Revert "fix(fs):After the file is copied or moved, flush the cache of the directory that was copied or moved to."

This reverts commit 5f03edd683.

---------

Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-24 16:15:24 +08:00
a9f02ecdac refactor(log):Refactor log filtering to use centralized configuration and add server-specific filtering (#798)
* feat(log):Add configurable log filtering middleware for HTTP requests

Implement a comprehensive log filtering system that allows selective suppression of HTTP request logs based on paths, methods, and prefixes. The system includes environment variable configuration support and filters health checks, WebDAV requests, and HEAD requests by default to reduce log noise.

* fix(log):Replace gin.DefaultLogFormatter with custom implementation

* Remove filtered logger test file

* fix(log):Refactor log filtering to use centralized configuration and add server-specific filtering

* fix(log):Add documentation comments for log filtering configuration
2025-07-24 16:10:47 +08:00
93849a3b5b fix(deps): update module github.com/pquerna/otp to v1.5.0 (#799)
Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-24 16:07:23 +08:00
c2e0d0c9ce fix(deps): update module github.com/protonmail/go-crypto to v1.3.0 (#800)
Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-24 16:06:50 +08:00
4a713363ee fix(deps): update module github.com/azure/azure-sdk-for-go/sdk/storage/azblob to v1.6.2 (#801)
Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-24 16:06:10 +08:00
3da8ccb7a7 fix(deps): update module github.com/rclone/rclone to v1.70.3 (#802)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 16:05:20 +08:00
676b8cff0b fix(deps): update azure-sdk-for-go monorepo (#579)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:27:36 +08:00
57cf28fc90 fix(deps): update github.com/fclairamb/ftpserverlib digest to 4a925d7 (#675)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:26:39 +08:00
8cf90e074d fix(deps): update module github.com/charmbracelet/bubbletea to v1.3.6 (#585)
Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-24 10:26:23 +08:00
74c2ed8306 fix(deps): update module github.com/charmbracelet/bubbles to v0.21.0 (#583)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:25:09 +08:00
5f03edd683 fix(fs):After the file is copied or moved, flush the cache of the directory that was copied or moved to. (#592)
* fix(fs):After the file is copied, the cache of the copied directory is refreshed

* fixed randomstring

* fixed EOL and Sync branch

chore(quark_uc): `webdav_policy` default to native_proxy

* fixed uuid and other bugs

* fixed comments

* fixed EOL

* add move refresh

* fixed builds

* fixed batch

* change betch to task.go

---------

Co-authored-by: Sumengjing <146963948+suyunjing-su@users.noreply.github.com>
2025-07-24 10:24:12 +08:00
8b65c918d4 chore(permission): admin enables webdav read-only by default (#726)
chore: admin enables webdav read-only by default
2025-07-24 10:19:49 +08:00
b5f0e3e5ee fix(deps): update module github.com/go-webauthn/webauthn to v0.13.4 (#677)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:05:44 +08:00
179894ff37 fix(deps): update module github.com/ipfs/go-cid to v0.5.0 (#680)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:05:05 +08:00
e2fc89c637 chore(deps): update dependency go to v1.24.5 (#783)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:04:20 +08:00
cacf67b181 fix(deps): update module github.com/yuin/goldmark to v1.7.13 (#794)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:04:00 +08:00
afb043e1d6 feat(docker): Change keep-alive strategy to runit, add aria2 log support (#791) 2025-07-24 09:19:33 +08:00
d9debb81ad feat(log):Add configurable log filtering middleware for HTTP requests (#782)
* feat(log):Add configurable log filtering middleware for HTTP requests

Implement a comprehensive log filtering system that allows selective suppression of HTTP request logs based on paths, methods, and prefixes. The system includes environment variable configuration support and filters health checks, WebDAV requests, and HEAD requests by default to reduce log noise.
2025-07-24 00:00:26 +08:00
4c069fddd6 fix(terabox): file upload error (#733)
* fix(terabox):fix file upload error failed to create file errno 10

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>

* fix(terabox):fix file upload error failed to create file errno 10

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>

* replace the goto statement with the retry-go package

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>

* Update util.go

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>

* Update util.go

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>

* go fmt

---------

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-23 23:42:12 +08:00
b450a2104d chore(docs): update domain (#788)
* chore(docs): update domain

* docs(issue): add guide link for bug reporting
2025-07-23 14:26:21 +08:00
7d0de17daf feat(static): fetch index.html from cdn for beta (#372)
* refactor(static): simplify folder iteration in Static function

* feat(static): disable local static when `cdn` is set

* feat(static): fetch index.html from cdn for beta

* refactor(static): use RestyClient for better retrying

* fix(static): add Accept header when fetching index.html from CDN

* refactor(static): optimize HTML replacement

* chore(static): add logging to static file system initialization

* feat(static): ensure static file redirected to CDN
2025-07-22 22:14:07 +08:00
bba4fb2203 fix(security): directory traversal (#744)
* fix(security): Directory traversal

* chore: .

* 优化

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-22 14:45:01 +08:00
a20c2020f8 fix(cmd): optimize parse of command flag --data (#777)
* Fix (cmd): optimize parse of command flag `--data`

* DBFile

* 优化

* os.Getwd()
2025-07-22 10:51:28 +08:00
a92b5eb929 refactor(cloudreve): use retry-go for net/http uploads (#773)
* refactor(cloudreve): use retry-go for uploads

* refactor(cloudreve_v4): use retry-go for uploads

* refactor(onedrive): use retry-go for uploads

* refactor(onedrive_app): use retry-go for uploads

* chore(onedrive_app): remove unnecessary error handling for host retrieval

* feat(cloudreve): move read logic inside retry block

* feat(cloudreve_v4): move read logic inside retry block

* feat(onedrive): move read logic inside retry block

* feat(onedrive_app): move read logic inside retry block
2025-07-22 10:25:04 +08:00
6817494a41 chore(ci): update cgo-actions to 1.2.1 & add patch version define for go (#779)
chore(ci): update cgo-actions to 1.2.1 & fix patch version for go
2025-07-22 09:02:07 +08:00
5a0d8ee1b8 feat(proxy): add disable proxy sign (#764)
* feat(proxy): add disable proxy sign

* Update driver.go

* GenerateDownProxyUrl

* .

* Update internal/op/driver.go

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>

* .

---------

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
2025-07-21 17:03:08 +08:00
012e51c551 fix(cloudreve_v4): remove deprecated authn check for login (#767)
* fix(cloudreve_v4): disable authn check for login

* chore(cloudreve_v4): update site login config fields
2025-07-21 15:53:10 +08:00
59ec1dbc9b feat(lenovonas_share): add option to not show root directory (#772) 2025-07-21 14:38:10 +08:00
6bb28d13f9 fix(quark): set the transcoding link ContentLength to the correct size 2025-07-20 16:40:32 +08:00
811a862288 feat(archives): add additional accepted archive extensions (#747) 2025-07-20 15:32:46 +08:00
74d32fd4d7 fix(simplehttp): logic bug when unable to parse file name (#761) 2025-07-20 14:13:30 +08:00
cedb3d488d [skip ci] chore(ci): output binary name set to openlist 2025-07-19 23:02:29 +08:00
86324d2d6b fix(net): ensure accurate content-length in response (#749)
* fix(fs): ensure accurate content-length in http2 requests

Chrome browsers were unable to preview thumbnails, reporting an
'ERR_HTTP_2_PROTOCOL_ERROR'. This was caused by an incorrect
content-length header in the server's response for thumbnail images.

This commit corrects the content-length calculation, allowing
Chrome and other compliant clients to render thumbnails correctly.

* fix(net): ensure accurate content-length in response

* 补缺

* .

---------

Co-authored-by: zhiqiang.huang <zhiqiang.tech@gmail.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-19 20:36:27 +08:00
648079ae24 remove upx (#750)
Update build.sh

Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-18 12:38:17 +08:00
Dgs
e8d45398d6 feat(quark_uc_tv): add streaming link api (#728) 2025-07-17 14:24:16 +08:00
0c461991f9 chore: standardize context keys with custom ContextKey type (#697)
* chore: standardize context keys with custom ContextKey type

* fix bug

* 使用Request.Context
2025-07-14 23:55:17 +08:00
2a4c546a8b feat: default settings api (#716)
* feat: default settings api

* fix logic bug

* chore
2025-07-14 23:41:34 +08:00
750d4eb3f6 docs(README): add disclaimer (#705)
add disclaimer
2025-07-13 15:22:25 +08:00
cc01b410a4 perf(link): optimize concurrent response (#641)
* fix(crypt): bug caused by link cache

* perf(crypt,mega,halalcloud,quark,uc): optimize concurrent response link

* chore: 删除无用代码

* ftp

* 修复bug;资源释放

* 添加SyncClosers

* local,sftp,smb

* 重构,优化,增强

* Update internal/stream/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>

* chore

* chore

* 优化,修复bug

* .

---------

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-12 17:57:54 +08:00
e5fbe72581 fix(security): add login count validation for webdav (#693) 2025-07-12 17:03:41 +08:00
283f3723d1 [skip ci] chore(ci): update openwrt hook 2025-07-12 12:06:36 +08:00
ad8c7b37a1 chore(ci):Disable duplicate build process 2025-07-12 11:49:27 +08:00
a84ffb96e9 chore(ci):Simplify the build process (#686)
* refactor(ci):Minify build files
2025-07-11 20:30:31 +08:00
19c6b6f930 feat(115_open): add offline download (#683) 2025-07-11 20:17:54 +08:00
eed3c0533c fix(deps): update module github.com/go-resty/resty/v2 to v2.16.5 (#628)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:26:44 +08:00
c72ba9828a fix(deps): update module github.com/deckarep/golang-set/v2 to v2.8.0 (#589)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:25:08 +08:00
4965a1b909 fix(deps): update module github.com/blevesearch/bleve/v2 to v2.5.2 (#582)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:24:50 +08:00
1bba550469 chore(deps): update dependency go to 1.24 (#578)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:24:23 +08:00
d678322b18 fix(deps): update module github.com/yuin/goldmark to v1.7.12 (#575)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:24:09 +08:00
efd8897bdf fix(deps): update module github.com/pkg/sftp to v1.13.9 (#574)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:23:52 +08:00
7c7cec0993 style(offline_download): add more description in log (#653)
fix(offline_download): add more description in log
2025-07-09 14:16:05 +08:00
3838ef0663 feat(traffic): update progress when caching file (#646)
* feat(traffic): update progress when caching file

* 调整参数位置和命名

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-08 21:41:45 +08:00
9e610af114 fix(115_open): upload progress error (#637) 2025-07-07 18:39:09 +08:00
0177177238 fix(crypt): pass refresh list request (close #609) 2025-07-06 13:20:42 +08:00
a77e515c9b fix(ocr): repair verification code OCR recognition service (#602)
* fix(ocr):Repair verification code OCR recognition service

* 修复对 非新用户 无效的问题

* chore: SettingItem.PreDefault重命名为MigrationValue

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-06 13:09:17 +08:00
4af16ab009 fix(115open):fix limit_rate save (#601) 2025-07-06 12:07:07 +08:00
da35423198 [skip ci] chore: go mod tidy 2025-07-06 00:55:23 +08:00
9612d61e60 chore(pkg): update singleflight 2025-07-05 13:31:47 +08:00
276 changed files with 7437 additions and 4975 deletions

View File

@ -25,11 +25,11 @@ body:
- label: | - label: |
我确认我的描述清晰,语法礼貌,能帮助开发者快速定位问题,并符合社区规则。 我确认我的描述清晰,语法礼貌,能帮助开发者快速定位问题,并符合社区规则。
- label: | - label: |
我已确认阅读了[OpenList文档](https://docs.oplist.org)。 我已确认阅读了[OpenList文档](https://doc.oplist.org)。
- label: | - label: |
我已确认没有重复的问题或讨论。 我已确认没有重复的问题或讨论。
- label: | - label: |
我已确认是`OpenList`的问题,而不是其他原因(例如 [网络](https://docs.oplist.org/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) `依赖`或`操作`)。 我已确认是`OpenList`的问题,而不是其他原因(例如 [网络](https://doc.oplist.org/faq/howto#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host-1) `依赖`或`操作`)。
- label: | - label: |
我认为此问题必须由`OpenList`处理,而非第三方。 我认为此问题必须由`OpenList`处理,而非第三方。
- label: | - label: |
@ -72,7 +72,7 @@ body:
attributes: attributes:
label: 日志(可选) label: 日志(可选)
description: | description: |
请复制粘贴错误日志,或者截图。(可隐藏隐私字段) 请复制粘贴错误日志,或者截图。(可隐藏隐私字段) [查看方法](https://doc.oplist.org/faq/howto#%E5%A6%82%E4%BD%95%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8Dbug)
- type: textarea - type: textarea
id: reproduction id: reproduction
attributes: attributes:

View File

@ -25,11 +25,11 @@ body:
- label: | - label: |
I confirm my description is clear, polite, helps developers quickly locate the issue, and complies with community rules. I confirm my description is clear, polite, helps developers quickly locate the issue, and complies with community rules.
- label: | - label: |
I have read the [OpenList documentation](https://docs.oplist.org). I have read the [OpenList documentation](https://doc.oplist.org).
- label: | - label: |
I confirm there are no duplicate issues or discussions. I confirm there are no duplicate issues or discussions.
- label: | - label: |
I confirm this is an `OpenList` issue, not caused by other reasons (such as [network](https://docs.oplist.org/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host), dependencies, or operation). I confirm this is an `OpenList` issue, not caused by other reasons (such as [network](https://doc.oplist.org/faq/howto#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host-1), dependencies, or operation).
- label: | - label: |
I believe this issue must be handled by `OpenList` and not by a third party. I believe this issue must be handled by `OpenList` and not by a third party.
- label: | - label: |
@ -72,7 +72,7 @@ body:
attributes: attributes:
label: Logs (optional) label: Logs (optional)
description: | description: |
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) [Guide](https://doc.oplist.org/faq/howto#how-to-quickly-locate-bugs)
- type: textarea - type: textarea
id: reproduction id: reproduction
attributes: attributes:

View File

@ -19,7 +19,7 @@ body:
- label: | - label: |
我确认我的描述清晰,语法礼貌,能帮助开发者快速定位问题,并符合社区规则。 我确认我的描述清晰,语法礼貌,能帮助开发者快速定位问题,并符合社区规则。
- label: | - label: |
我已确认阅读了[OpenList文档](https://docs.oplist.org)。 我已确认阅读了[OpenList文档](https://doc.oplist.org)。
- label: | - label: |
我已确认没有重复的问题或讨论。 我已确认没有重复的问题或讨论。
- label: | - label: |

View File

@ -19,7 +19,7 @@ body:
- label: | - label: |
I confirm my description is clear, polite, helps developers quickly locate the issue, and complies with community rules. I confirm my description is clear, polite, helps developers quickly locate the issue, and complies with community rules.
- label: | - label: |
I have read the [OpenList documentation](https://docs.oplist.org). I have read the [OpenList documentation](https://doc.oplist.org).
- label: | - label: |
I confirm there are no duplicate issues or discussions. I confirm there are no duplicate issues or discussions.
- label: | - label: |

View File

@ -14,12 +14,8 @@ permissions:
jobs: jobs:
changelog: changelog:
strategy:
matrix:
platform: [ubuntu-latest]
go-version: ["1.21"]
name: Beta Release Changelog name: Beta Release Changelog
runs-on: ${{ matrix.platform }} runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
@ -65,7 +61,7 @@ jobs:
strategy: strategy:
matrix: matrix:
include: include:
- target: "!(*musl*|*windows-arm64*|*android*|*freebsd*)" # xgo - target: "!(*musl*|*windows-arm64*|*windows7-*|*android*|*freebsd*)" # xgo and loongarch
hash: "md5" hash: "md5"
- target: "linux-!(arm*)-musl*" #musl-not-arm - target: "linux-!(arm*)-musl*" #musl-not-arm
hash: "md5-linux-musl" hash: "md5-linux-musl"
@ -73,6 +69,8 @@ jobs:
hash: "md5-linux-musl-arm" hash: "md5-linux-musl-arm"
- target: "windows-arm64" #win-arm64 - target: "windows-arm64" #win-arm64
hash: "md5-windows-arm64" hash: "md5-windows-arm64"
- target: "windows7-*" #win7
hash: "md5-windows7"
- target: "android-*" #android - target: "android-*" #android
hash: "md5-android" hash: "md5-android"
- target: "freebsd-*" #freebsd - target: "freebsd-*" #freebsd
@ -89,27 +87,29 @@ jobs:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
with: with:
go-version: "1.22" go-version: "1.24.5"
- name: Setup web - name: Setup web
run: bash build.sh dev web run: bash build.sh dev web
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Build - name: Build
uses: OpenListTeam/cgo-actions@v1.1.2 uses: OpenListTeam/cgo-actions@v1.2.2
with: with:
targets: ${{ matrix.target }} targets: ${{ matrix.target }}
musl-target-format: $os-$musl-$arch musl-target-format: $os-$musl-$arch
github-token: ${{ secrets.GITHUB_TOKEN }}
out-dir: build out-dir: build
output: openlist-$target$ext output: openlist-$target$ext
musl-base-url: "https://github.com/OpenListTeam/musl-compilers/releases/latest/download/" musl-base-url: "https://github.com/OpenListTeam/musl-compilers/releases/latest/download/"
x-flags: | x-flags: |
github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=OpenList github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=The OpenList Projects Contributors <noreply@openlist.team>
github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit
github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=dev github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=rolling
- name: Compress - name: Compress
run: | run: |

View File

@ -1,8 +1,6 @@
name: Test Build name: Test Build
on: on:
push:
branches: ["main"]
pull_request: pull_request:
branches: ["main"] branches: ["main"]
workflow_dispatch: workflow_dispatch:
@ -15,7 +13,6 @@ jobs:
build: build:
strategy: strategy:
matrix: matrix:
platform: [ubuntu-latest]
target: target:
- darwin-amd64 - darwin-amd64
- darwin-arm64 - darwin-arm64
@ -24,8 +21,8 @@ jobs:
- linux-amd64-musl - linux-amd64-musl
- windows-arm64 - windows-arm64
- android-arm64 - android-arm64
name: Build name: Build ${{ matrix.target }}
runs-on: ${{ matrix.platform }} runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
@ -36,28 +33,31 @@ jobs:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
with: with:
go-version: "1.22" go-version: "1.24.5"
- name: Setup web - name: Setup web
run: bash build.sh dev web run: bash build.sh dev web
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Build - name: Build
uses: OpenListTeam/cgo-actions@v1.1.2 uses: OpenListTeam/cgo-actions@v1.2.2
with: with:
targets: ${{ matrix.target }} targets: ${{ matrix.target }}
musl-target-format: $os-$musl-$arch musl-target-format: $os-$musl-$arch
github-token: ${{ secrets.GITHUB_TOKEN }}
out-dir: build out-dir: build
x-flags: | x-flags: |
github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=OpenList github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=The OpenList Projects Contributors <noreply@openlist.team>
github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit
github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=dev github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=rolling
output: openlist$ext
- name: Upload artifact - name: Upload artifact
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: openlist_${{ env.SHA }}_${{ matrix.target }} name: openlist_${{ steps.short-sha.outputs.sha }}_${{ matrix.target }}
path: build/* path: build/*

View File

@ -1,4 +1,4 @@
name: Automatic changelog name: Release Automatic changelog
on: on:
push: push:

View File

@ -8,24 +8,34 @@ permissions:
contents: write contents: write
jobs: jobs:
# Set release to prerelease first
prerelease:
name: Set Prerelease
runs-on: ubuntu-latest
steps:
- name: Prerelease
uses: irongut/EditRelease@v1.2.0
with:
token: ${{ secrets.GITHUB_TOKEN }}
id: ${{ github.event.release.id }}
prerelease: true
# Main release job for all platforms
release: release:
needs: prerelease
strategy: strategy:
matrix: matrix:
platform: [ ubuntu-latest ] build-type: [ 'standard', 'lite' ]
go-version: [ '1.21' ] target-platform: [ '', 'android', 'freebsd', 'linux_musl', 'linux_musl_arm' ]
name: Release name: Release ${{ matrix.target-platform && format('{0} ', matrix.target-platform) || '' }}${{ matrix.build-type == 'lite' && 'Lite' || '' }}
runs-on: ${{ matrix.platform }} runs-on: ubuntu-latest
steps: steps:
- name: Free Disk Space (Ubuntu) - name: Free Disk Space (Ubuntu)
if: matrix.target-platform == ''
uses: jlumbroso/free-disk-space@main uses: jlumbroso/free-disk-space@main
with: with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: false tool-cache: false
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true android: true
dotnet: true dotnet: true
haskell: true haskell: true
@ -33,17 +43,10 @@ jobs:
docker-images: true docker-images: true
swap-storage: true swap-storage: true
- name: Prerelease
uses: irongut/EditRelease@v1.2.0
with:
token: ${{ secrets.GITHUB_TOKEN }}
id: ${{ github.event.release.id }}
prerelease: true
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
with: with:
go-version: ${{ matrix.go-version }} go-version: '1.24'
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
@ -51,6 +54,7 @@ jobs:
fetch-depth: 0 fetch-depth: 0
- name: Install dependencies - name: Install dependencies
if: matrix.target-platform == ''
run: | run: |
sudo snap install zig --classic --beta sudo snap install zig --classic --beta
docker pull crazymax/xgo:latest docker pull crazymax/xgo:latest
@ -59,70 +63,10 @@ jobs:
- name: Build - name: Build
run: | run: |
bash build.sh release bash build.sh release ${{ matrix.build-type == 'lite' && 'lite' || '' }} ${{ matrix.target-platform }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*
prerelease: false
release-lite:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release Lite
runs-on: ${{ matrix.platform }}
steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: false
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- name: Prerelease
uses: irongut/EditRelease@v1.2.0
with:
token: ${{ secrets.GITHUB_TOKEN }}
id: ${{ github.event.release.id }}
prerelease: true
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install dependencies
run: |
sudo snap install zig --classic --beta
docker pull crazymax/xgo:latest
go install github.com/crazy-max/xgo@latest
sudo apt install upx
- name: Build
run: |
bash build.sh release lite
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload assets - name: Upload assets
uses: softprops/action-gh-release@v2 uses: softprops/action-gh-release@v2

View File

@ -1,69 +0,0 @@
name: Release builds (Android)
on:
release:
types: [ published ]
permissions:
contents: write
jobs:
release_android:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release android
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*
release_android_lite:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release lite android
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*

View File

@ -31,11 +31,8 @@ env:
REGISTRY: ghcr.io REGISTRY: ghcr.io
ARTIFACT_NAME: 'binaries_docker_release' ARTIFACT_NAME: 'binaries_docker_release'
ARTIFACT_NAME_LITE: 'binaries_docker_release_lite' ARTIFACT_NAME_LITE: 'binaries_docker_release_lite'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64' RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/ppc64le,linux/riscv64,linux/loong64' ### Temporarily disable Docker builds for linux/s390x architectures for unknown reasons.
IMAGE_PUSH: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }} IMAGE_PUSH: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }}
IMAGE_IS_PROD: ${{ github.ref_type == 'tag' || github.event.inputs.as_latest == 'true' }}
IMAGE_TAGS_BETA: |
type=raw,value=beta,enable={{is_default_branch}}
permissions: permissions:
packages: write packages: write
@ -65,17 +62,11 @@ jobs:
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build go binary (beta)
if: env.IMAGE_IS_PROD != 'true'
run: bash build.sh beta docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build go binary (release) - name: Build go binary (release)
if: env.IMAGE_IS_PROD == 'true'
run: bash build.sh release docker-multiplatform run: bash build.sh release docker-multiplatform
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
@ -88,7 +79,7 @@ jobs:
!build/musl-libs/** !build/musl-libs/**
build_binary_lite: build_binary_lite:
name: Build Binaries for Docker Release name: Build Binaries for Docker Release (Lite)
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
@ -111,17 +102,11 @@ jobs:
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build go binary (beta)
if: env.IMAGE_IS_PROD != 'true'
run: bash build.sh beta lite docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build go binary (release) - name: Build go binary (release)
if: env.IMAGE_IS_PROD == 'true'
run: bash build.sh release lite docker-multiplatform run: bash build.sh release lite docker-multiplatform
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
@ -142,15 +127,19 @@ jobs:
image: ["latest", "ffmpeg", "aria2", "aio"] image: ["latest", "ffmpeg", "aria2", "aio"]
include: include:
- image: "latest" - image: "latest"
base_image_tag: "base"
build_arg: "" build_arg: ""
tag_favor: "" tag_favor: ""
- image: "ffmpeg" - image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-ffmpeg,onlatest=true" tag_favor: "suffix=-ffmpeg,onlatest=true"
- image: "aria2" - image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-aria2,onlatest=true" tag_favor: "suffix=-aria2,onlatest=true"
- image: "aio" - image: "aio"
base_image_tag: "aio"
build_arg: | build_arg: |
INSTALL_FFMPEG=true INSTALL_FFMPEG=true
INSTALL_ARIA2=true INSTALL_ARIA2=true
@ -181,7 +170,7 @@ jobs:
if: env.IMAGE_PUSH == 'true' if: env.IMAGE_PUSH == 'true'
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
username: ${{ env.DOCKERHUB_ORG_NAME }} username: ${{ vars.DOCKERHUB_ORG_NAME_BACKUP || env.DOCKERHUB_ORG_NAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Docker meta - name: Docker meta
@ -192,13 +181,11 @@ jobs:
${{ env.REGISTRY }}/${{ env.GHCR_ORG_NAME }}/${{ env.IMAGE_NAME }} ${{ env.REGISTRY }}/${{ env.GHCR_ORG_NAME }}/${{ env.IMAGE_NAME }}
${{ env.DOCKERHUB_ORG_NAME }}/${{ env.IMAGE_NAME_DOCKERHUB }} ${{ env.DOCKERHUB_ORG_NAME }}/${{ env.IMAGE_NAME_DOCKERHUB }}
tags: > tags: >
${{ env.IMAGE_IS_PROD == 'true' && ( ${{ github.event_name == 'workflow_dispatch'
github.event_name == 'workflow_dispatch'
&& format('type=raw,value={0}', github.event.inputs.manual_tag) && format('type=raw,value={0}', github.event.inputs.manual_tag)
|| format('type=raw,value={0}', github.ref_name) || format('type=raw,value={0}', github.ref_name) }}
) || env.IMAGE_TAGS_BETA }}
flavor: | flavor: |
latest=${{ env.IMAGE_IS_PROD }} latest=${{ github.event_name == 'push' || github.event.inputs.as_latest == 'true' }}
${{ matrix.tag_favor }} ${{ matrix.tag_favor }}
- name: Build and push - name: Build and push
@ -208,29 +195,35 @@ jobs:
context: . context: .
file: Dockerfile.ci file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }} push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: ${{ matrix.build_arg }} build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }} platforms: ${{ env.RELEASE_PLATFORMS }}
release_docker_lite: release_docker_lite:
needs: build_binary_lite needs: build_binary_lite
name: Release Docker image name: Release Docker image (Lite)
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
image: ["latest", "ffmpeg", "aria2", "aio"] image: ["latest", "ffmpeg", "aria2", "aio"]
include: include:
- image: "latest" - image: "latest"
base_image_tag: "base"
build_arg: "" build_arg: ""
tag_favor: "suffix=-lite,onlatest=true" tag_favor: "suffix=-lite,onlatest=true"
- image: "ffmpeg" - image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-lite-ffmpeg,onlatest=true" tag_favor: "suffix=-lite-ffmpeg,onlatest=true"
- image: "aria2" - image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-lite-aria2,onlatest=true" tag_favor: "suffix=-lite-aria2,onlatest=true"
- image: "aio" - image: "aio"
base_image_tag: "aio"
build_arg: | build_arg: |
INSTALL_FFMPEG=true INSTALL_FFMPEG=true
INSTALL_ARIA2=true INSTALL_ARIA2=true
@ -261,7 +254,7 @@ jobs:
if: env.IMAGE_PUSH == 'true' if: env.IMAGE_PUSH == 'true'
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
username: ${{ env.DOCKERHUB_ORG_NAME }} username: ${{ vars.DOCKERHUB_ORG_NAME_BACKUP || env.DOCKERHUB_ORG_NAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Docker meta - name: Docker meta
@ -272,13 +265,11 @@ jobs:
${{ env.REGISTRY }}/${{ env.GHCR_ORG_NAME }}/${{ env.IMAGE_NAME }} ${{ env.REGISTRY }}/${{ env.GHCR_ORG_NAME }}/${{ env.IMAGE_NAME }}
${{ env.DOCKERHUB_ORG_NAME }}/${{ env.IMAGE_NAME_DOCKERHUB }} ${{ env.DOCKERHUB_ORG_NAME }}/${{ env.IMAGE_NAME_DOCKERHUB }}
tags: > tags: >
${{ env.IMAGE_IS_PROD == 'true' && ( ${{ github.event_name == 'workflow_dispatch'
github.event_name == 'workflow_dispatch'
&& format('type=raw,value={0}', github.event.inputs.manual_tag) && format('type=raw,value={0}', github.event.inputs.manual_tag)
|| format('type=raw,value={0}', github.ref_name) || format('type=raw,value={0}', github.ref_name) }}
) || env.IMAGE_TAGS_BETA }}
flavor: | flavor: |
latest=${{ env.IMAGE_IS_PROD }} latest=${{ github.event_name == 'push' || github.event.inputs.as_latest == 'true' }}
${{ matrix.tag_favor }} ${{ matrix.tag_favor }}
- name: Build and push - name: Build and push
@ -288,7 +279,9 @@ jobs:
context: . context: .
file: Dockerfile.ci file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }} push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: ${{ matrix.build_arg }} build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }} platforms: ${{ env.RELEASE_PLATFORMS }}

View File

@ -1,69 +0,0 @@
name: Release builds (Freebsd)
on:
release:
types: [ published ]
permissions:
contents: write
jobs:
release_freebsd:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release freebsd
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*
release_freebsd_lite:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release lite freebsd
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*

View File

@ -1,69 +0,0 @@
name: Release builds (linux_musl)
on:
release:
types: [ published ]
permissions:
contents: write
jobs:
release_linux_musl:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release linux_musl
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*
release_linux_musl_lite:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release lite linux_musl
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*

View File

@ -1,70 +0,0 @@
name: Release builds (linux_musl_arm)
on:
release:
types: [ published ]
permissions:
contents: write
jobs:
release_linux_musl_arm:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release linux_musl_arm
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*
release_linux_musl_arm_lite:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release lite linux_musl_arm
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*

View File

@ -1,4 +1,4 @@
name: Docker Beta Release name: Beta Release (Docker)
on: on:
workflow_dispatch: workflow_dispatch:
@ -20,8 +20,7 @@ env:
IMAGE_NAME_DOCKERHUB: openlist IMAGE_NAME_DOCKERHUB: openlist
REGISTRY: ghcr.io REGISTRY: ghcr.io
ARTIFACT_NAME: 'binaries_docker_release' ARTIFACT_NAME: 'binaries_docker_release'
ARTIFACT_NAME_LITE: 'binaries_docker_release_lite' RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/ppc64le,linux/riscv64,linux/loong64' ### Temporarily disable Docker builds for linux/s390x architectures for unknown reasons.
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
IMAGE_PUSH: ${{ github.event_name == 'push' }} IMAGE_PUSH: ${{ github.event_name == 'push' }}
IMAGE_TAGS_BETA: | IMAGE_TAGS_BETA: |
type=ref,event=pr type=ref,event=pr
@ -29,7 +28,7 @@ env:
jobs: jobs:
build_binary: build_binary:
name: Build Binaries for Docker Release name: Build Binaries for Docker Release (Beta)
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
@ -56,6 +55,7 @@ jobs:
run: bash build.sh beta docker-multiplatform run: bash build.sh beta docker-multiplatform
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
@ -69,7 +69,7 @@ jobs:
release_docker: release_docker:
needs: build_binary needs: build_binary
name: Release Docker image name: Release Docker image (Beta)
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
packages: write packages: write
@ -78,15 +78,19 @@ jobs:
image: ["latest", "ffmpeg", "aria2", "aio"] image: ["latest", "ffmpeg", "aria2", "aio"]
include: include:
- image: "latest" - image: "latest"
base_image_tag: "base"
build_arg: "" build_arg: ""
tag_favor: "" tag_favor: ""
- image: "ffmpeg" - image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-ffmpeg,onlatest=true" tag_favor: "suffix=-ffmpeg,onlatest=true"
- image: "aria2" - image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-aria2,onlatest=true" tag_favor: "suffix=-aria2,onlatest=true"
- image: "aio" - image: "aio"
base_image_tag: "aio"
build_arg: | build_arg: |
INSTALL_FFMPEG=true INSTALL_FFMPEG=true
INSTALL_ARIA2=true INSTALL_ARIA2=true
@ -117,7 +121,7 @@ jobs:
if: env.IMAGE_PUSH == 'true' if: env.IMAGE_PUSH == 'true'
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
username: ${{ env.DOCKERHUB_ORG_NAME }} username: ${{ vars.DOCKERHUB_ORG_NAME_BACKUP || env.DOCKERHUB_ORG_NAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Docker meta - name: Docker meta
@ -138,7 +142,9 @@ jobs:
context: . context: .
file: Dockerfile.ci file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }} push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: ${{ matrix.build_arg }} build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }} platforms: ${{ env.RELEASE_PLATFORMS }}

View File

@ -19,7 +19,7 @@ jobs:
uses: peter-evans/repository-dispatch@v3 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.EXTERNAL_REPO_TOKEN_LUCI_APP_OPENLIST }} token: ${{ secrets.EXTERNAL_REPO_TOKEN_LUCI_APP_OPENLIST }}
repository: ${{ vars.HOOK_REPO || 'OpenListTeam/luci-app-openlist' }} repository: ${{ vars.HOOK_REPO || 'OpenListTeam/OpenList-OpenWRT' }}
event-type: update-hashes event-type: update-hashes
client-payload: | client-payload: |
{ {

View File

@ -1,4 +1,7 @@
FROM docker.io/library/alpine:edge AS builder ### Default image is base. You can add other support by modifying BASE_IMAGE_TAG. The following parameters are supported: base (default), aria2, ffmpeg, aio
ARG BASE_IMAGE_TAG=base
FROM alpine:edge AS builder
LABEL stage=go-builder LABEL stage=go-builder
WORKDIR /app/ WORKDIR /app/
RUN apk add --no-cache bash curl jq gcc git go musl-dev RUN apk add --no-cache bash curl jq gcc git go musl-dev
@ -7,36 +10,26 @@ RUN go mod download
COPY ./ ./ COPY ./ ./
RUN bash build.sh release docker RUN bash build.sh release docker
FROM alpine:edge FROM openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
LABEL MAINTAINER="OpenList"
ARG INSTALL_FFMPEG=false ARG INSTALL_FFMPEG=false
ARG INSTALL_ARIA2=false ARG INSTALL_ARIA2=false
LABEL MAINTAINER="OpenList" ARG USER=openlist
ARG UID=1001
ARG GID=1001
WORKDIR /opt/openlist/ WORKDIR /opt/openlist/
RUN apk update && \
apk upgrade --no-cache && \
apk add --no-cache bash ca-certificates su-exec tzdata; \
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
mkdir -p /opt/aria2/.aria2 && \
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
touch /opt/aria2/.aria2/aria2.session && \
/opt/aria2/.aria2/tracker.sh ; \
rm -rf /var/cache/apk/*
COPY --chmod=755 --from=builder /app/bin/openlist ./ COPY --chmod=755 --from=builder /app/bin/openlist ./
COPY --chmod=755 entrypoint.sh /entrypoint.sh COPY --chmod=755 entrypoint.sh /entrypoint.sh
RUN adduser -u ${UID} -g ${GID} -h /opt/openlist/data -D -s /bin/sh ${USER} \
&& chown -R ${UID}:${GID} /opt \
&& chown -R ${UID}:${GID} /entrypoint.sh
USER ${USER}
RUN /entrypoint.sh version RUN /entrypoint.sh version
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2} ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
VOLUME /opt/openlist/data/ VOLUME /opt/openlist/data/
EXPOSE 5244 5245 EXPOSE 5244 5245
CMD [ "/entrypoint.sh" ] CMD [ "/entrypoint.sh" ]

View File

@ -1,34 +1,26 @@
FROM docker.io/library/alpine:edge ARG BASE_IMAGE_TAG=base
FROM ghcr.io/openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
LABEL MAINTAINER="OpenList"
ARG TARGETPLATFORM ARG TARGETPLATFORM
ARG INSTALL_FFMPEG=false ARG INSTALL_FFMPEG=false
ARG INSTALL_ARIA2=false ARG INSTALL_ARIA2=false
LABEL MAINTAINER="OpenList" ARG USER=openlist
ARG UID=1001
ARG GID=1001
WORKDIR /opt/openlist/ WORKDIR /opt/openlist/
RUN apk update && \
apk upgrade --no-cache && \
apk add --no-cache bash ca-certificates su-exec tzdata; \
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
mkdir -p /opt/aria2/.aria2 && \
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
touch /opt/aria2/.aria2/aria2.session && \
/opt/aria2/.aria2/tracker.sh ; \
rm -rf /var/cache/apk/*
COPY --chmod=755 /build/${TARGETPLATFORM}/openlist ./ COPY --chmod=755 /build/${TARGETPLATFORM}/openlist ./
COPY --chmod=755 entrypoint.sh /entrypoint.sh COPY --chmod=755 entrypoint.sh /entrypoint.sh
RUN adduser -u ${UID} -g ${GID} -h /opt/openlist/data -D -s /bin/sh ${USER} \
&& chown -R ${UID}:${GID} /opt \
&& chown -R ${UID}:${GID} /entrypoint.sh
USER ${USER}
RUN /entrypoint.sh version RUN /entrypoint.sh version
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2} ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
VOLUME /opt/openlist/data/ VOLUME /opt/openlist/data/
EXPOSE 5244 5245 EXPOSE 5244 5245
CMD [ "/entrypoint.sh" ] CMD [ "/entrypoint.sh" ]

View File

@ -20,6 +20,34 @@
- [CODE OF CONDUCT](./CODE_OF_CONDUCT.md) - [CODE OF CONDUCT](./CODE_OF_CONDUCT.md)
- [LICENSE](./LICENSE) - [LICENSE](./LICENSE)
## Disclaimer
OpenList is an open-source project independently maintained by the OpenList Team, following the AGPL-3.0 license and committed to maintaining complete code openness and modification transparency.
We have noticed the emergence of some third-party projects in the community with names similar to this project, such as OpenListApp/OpenListApp, as well as some paid proprietary software using the same or similar naming. To avoid user confusion, we hereby declare:
- OpenList has no official association with any third-party derivative projects.
- All software, code, and services of this project are maintained by the OpenList Team and are freely available on GitHub.
- Project documentation and API services primarily rely on charitable resources provided by Cloudflare. There are currently no paid plans or commercial deployments, and the use of existing features does not involve any costs.
We respect the community's rights to free use and derivative development, but we also strongly urge downstream projects:
- Should not use the "OpenList" name for impersonation promotion or commercial gain;
- Must not distribute OpenList-based code in a closed-source manner or violate AGPL license terms.
To better maintain healthy ecosystem development, we recommend:
- Clearly indicate the project source and choose appropriate open-source licenses in accordance with the open-source spirit;
- If involving commercial use, please avoid using "OpenList" or any confusing naming as the project name;
- If you need to use materials located under OpenListTeam/Logo, you may modify and use them under compliance with the agreement.
Thank you for your support and understanding of the OpenList project.
## Features ## Features
- [x] Multiple storages - [x] Multiple storages
@ -78,8 +106,9 @@
## Document ## Document
- 📘 [Docs & Install Guide](https://docs.oplist.org) - 📘 [Global Site](https://doc.oplist.org)
- 📚 [Backup Docs Site](https://docs.openlist.team) - 📚 [Backup Site](https://doc.openlist.team)
- 🌏 [CN Site](https://doc.oplist.org.cn)
## Demo ## Demo

View File

@ -20,6 +20,34 @@
- [行为准则](./CODE_OF_CONDUCT.md) - [行为准则](./CODE_OF_CONDUCT.md)
- [许可证](./LICENSE) - [许可证](./LICENSE)
## 免责声明
OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3.0 许可证,致力于保持完整的代码开放性和修改透明性。
我们注意到社区中出现了一些与本项目名称相似的第三方项目,如 OpenListApp/OpenListApp以及部分采用相同或近似命名的收费专有软件。为避免用户误解现声明如下
- OpenList 与任何第三方衍生项目无官方关联。
- 本项目的全部软件、代码与服务由 OpenList 团队维护,可在 GitHub 免费获取。
- 项目文档与 API 服务均主要依托于 Cloudflare 提供的公益资源,目前无任何收费计划或商业部署,现有功能使用不涉及任何支出。
我们尊重社区的自由使用与衍生开发权利,但也强烈呼吁下游项目:
- 不应以“OpenList”名义进行冒名宣传或获取商业利益
- 不得将基于 OpenList 的代码进行闭源分发或违反 AGPL 许可证条款。
为了更好地维护生态健康发展,我们建议:
- 明确注明项目来源,并以符合开源精神的方式选择适当的开源许可证;
- 如涉及商业用途请避免使用“OpenList”或任何会产生混淆的方式作为项目名称
- 若需使用本项目位于 OpenListTeam/Logo 下的素材,可在遵守协议的前提下进行修改后使用。
感谢您对 OpenList 项目的支持与理解。
## 功能 ## 功能
- [x] 多种存储 - [x] 多种存储
@ -78,8 +106,9 @@
## 文档 ## 文档
- 📘 [文档与安装指南](https://docs.oplist.org) - 🌏 [国内站点](https://doc.oplist.org.cn)
- 📚 [备用文档站点](https://docs.openlist.team) - 📘 [海外站点](https://doc.oplist.org)
- 📚 [备用站点](https://doc.openlist.team)
## 演示 ## 演示

View File

@ -20,6 +20,34 @@
- [行動規範](./CODE_OF_CONDUCT.md) - [行動規範](./CODE_OF_CONDUCT.md)
- [ライセンス](./LICENSE) - [ライセンス](./LICENSE)
## 免責事項
OpenListは、OpenListチームが独立して維持するオープンソースプロジェクトであり、AGPL-3.0ライセンスに従い、完全なコードの開放性と変更の透明性を維持することに専念しています。
コミュニティ内で、OpenListApp/OpenListAppなど、本プロジェクトと類似した名称を持つサードパーティプロジェクトや、同一または類似した命名を採用する有料専有ソフトウェアが出現していることを確認しています。ユーザーの誤解を避けるため、以下のように宣言いたします
- OpenListは、いかなるサードパーティ派生プロジェクトとも公式な関連性はありません。
- 本プロジェクトのすべてのソフトウェア、コード、サービスはOpenListチームによって維持され、GitHubで無料で取得できます。
- プロジェクトドキュメントとAPIサービスは主にCloudflareが提供する公益リソースに依存しており、現在有料プランや商業展開はなく、既存機能の使用に費用は発生しません。
私たちはコミュニティの自由な使用と派生開発の権利を尊重しますが、下流プロジェクトに強く呼びかけます:
- 「OpenList」の名前で偽装宣伝や商業利益を得るべきではありません
- OpenListベースのコードをクローズドソースで配布したり、AGPLライセンス条項に違反してはいけません。
エコシステムの健全な発展をより良く維持するため、以下を推奨します:
- プロジェクトの出典を明確に示し、オープンソース精神に合致する適切なオープンソースライセンスを選択する;
- 商業用途が関わる場合は、「OpenList」や混乱を招く可能性のある名前をプロジェクト名として使用することを避ける
- OpenListTeam/Logo下の素材を使用する必要がある場合は、協定を遵守した上で修正して使用できます。
OpenListプロジェクトへのご支援とご理解をありがとうございます。
## 特徴 ## 特徴
- [x] 複数ストレージ - [x] 複数ストレージ
@ -78,8 +106,9 @@
## ドキュメント ## ドキュメント
- 📘 [ドキュメント・インストールガイド](https://docs.oplist.org) - 📘 [グローバルサイト](https://doc.oplist.org)
- 📚 [バックアップドキュメントサイト](https://docs.openlist.team) - 📚 [バックアップサイト](https://doc.openlist.team)
- 🌏 [CNサイト](https://doc.oplist.org.cn)
## デモ ## デモ

View File

@ -20,6 +20,34 @@
- [Gedragscode](./CODE_OF_CONDUCT.md) - [Gedragscode](./CODE_OF_CONDUCT.md)
- [Licentie](./LICENSE) - [Licentie](./LICENSE)
## Disclaimer
OpenList is een open-source project dat onafhankelijk wordt onderhouden door het OpenList Team, volgend op de AGPL-3.0 licentie en toegewijd aan het behouden van volledige code openheid en transparantie van wijzigingen.
We hebben gemerkt dat er in de gemeenschap enkele derde partij projecten zijn verschenen met namen vergelijkbaar met dit project, zoals OpenListApp/OpenListApp, evenals enkele betaalde eigendomssoftware die dezelfde of soortgelijke naamgeving gebruikt. Om verwarring bij gebruikers te voorkomen, verklaren we hierbij:
- OpenList heeft geen officiële associatie met enige derde partij afgeleide projecten.
- Alle software, code en diensten van dit project worden onderhouden door het OpenList Team en zijn gratis beschikbaar op GitHub.
- Projectdocumentatie en API diensten zijn voornamelijk afhankelijk van liefdadigheidsbronnen verstrekt door Cloudflare. Er zijn momenteel geen betaalplannen of commerciële implementaties, en het gebruik van bestaande functies brengt geen kosten met zich mee.
We respecteren de rechten van de gemeenschap voor vrij gebruik en afgeleide ontwikkeling, maar we roepen downstream projecten ook ten zeerste op:
- Mogen niet de "OpenList" naam gebruiken voor namaakpromotie of commercieel gewin;
- Mogen OpenList-gebaseerde code niet distribueren op een closed-source manier of AGPL licentievoorwaarden schenden.
Om een gezonde ecosysteemontwikkeling beter te onderhouden, bevelen we aan:
- Duidelijk de projectbron aangeven en passende open-source licenties kiezen in overeenstemming met de open-source geest;
- Bij commercieel gebruik, vermijd het gebruik van "OpenList" of enige verwarrende naamgeving als projectnaam;
- Als u materialen onder OpenListTeam/Logo moet gebruiken, kunt u deze wijzigen en gebruiken onder naleving van de overeenkomst.
Dank u voor uw ondersteuning en begrip
## Functies ## Functies
- [x] Meerdere opslagmogelijkheden - [x] Meerdere opslagmogelijkheden
@ -78,8 +106,9 @@
## Documentatie ## Documentatie
- 📘 [Documentatie & Installatiegids](https://docs.oplist.org) - 📘 [Global Site](https://doc.oplist.org)
- 📚 [Back-up documentatiesite](https://docs.openlist.team) - 📚 [Backup Site](https://doc.openlist.team)
- 🌏 [CN Site](https://doc.oplist.org.cn)
## Demo ## Demo

255
build.sh
View File

@ -4,6 +4,9 @@ builtAt="$(date +'%F %T %z')"
gitAuthor="The OpenList Projects Contributors <noreply@openlist.team>" gitAuthor="The OpenList Projects Contributors <noreply@openlist.team>"
gitCommit=$(git log --pretty=format:"%h" -1) gitCommit=$(git log --pretty=format:"%h" -1)
# Set frontend repository, default to OpenListTeam/OpenList-Frontend
frontendRepo="${FRONTEND_REPO:-OpenListTeam/OpenList-Frontend}"
githubAuthArgs="" githubAuthArgs=""
if [ -n "$GITHUB_TOKEN" ]; then if [ -n "$GITHUB_TOKEN" ]; then
githubAuthArgs="--header \"Authorization: Bearer $GITHUB_TOKEN\"" githubAuthArgs="--header \"Authorization: Bearer $GITHUB_TOKEN\""
@ -17,15 +20,15 @@ fi
if [ "$1" = "dev" ]; then if [ "$1" = "dev" ]; then
version="dev" version="dev"
webVersion="dev" webVersion="rolling"
elif [ "$1" = "beta" ]; then elif [ "$1" = "beta" ]; then
version="beta" version="beta"
webVersion="dev" webVersion="rolling"
else else
git tag -d beta || true git tag -d beta || true
# Always true if there's no tag # Always true if there's no tag
version=$(git describe --abbrev=0 --tags 2>/dev/null || echo "v0.0.0") version=$(git describe --abbrev=0 --tags 2>/dev/null || echo "v0.0.0")
webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g') webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/$frontendRepo/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
fi fi
echo "backend version: $version" echo "backend version: $version"
@ -45,30 +48,21 @@ ldflags="\
-X 'github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=$webVersion' \ -X 'github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=$webVersion' \
" "
FetchWebDev() { FetchWebRolling() {
pre_release_tag=$(eval "curl -fsSL --max-time 2 $githubAuthArgs https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases" | jq -r 'map(select(.prerelease)) | first | .tag_name') pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/tags/rolling\"")
if [ -z "$pre_release_tag" ] || [ "$pre_release_tag" == "null" ]; then
# fall back to latest release
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"")
else
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/tags/$pre_release_tag\"")
fi
pre_release_assets=$(echo "$pre_release_json" | jq -r '.assets[].browser_download_url') pre_release_assets=$(echo "$pre_release_json" | jq -r '.assets[].browser_download_url')
if [ "$useLite" = true ]; then # There is no lite for rolling
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist-lite" | grep "\.tar\.gz$")
else
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist" | grep -v "lite" | grep "\.tar\.gz$") pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist" | grep -v "lite" | grep "\.tar\.gz$")
fi
curl -fsSL "$pre_release_tar_url" -o web-dist-dev.tar.gz curl -fsSL "$pre_release_tar_url" -o dist.tar.gz
rm -rf public/dist && mkdir -p public/dist rm -rf public/dist && mkdir -p public/dist
tar -zxvf web-dist-dev.tar.gz -C public/dist tar -zxvf dist.tar.gz -C public/dist
rm -rf web-dist-dev.tar.gz rm -rf dist.tar.gz
} }
FetchWebRelease() { FetchWebRelease() {
release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"") release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/latest\"")
release_assets=$(echo "$release_json" | jq -r '.assets[].browser_download_url') release_assets=$(echo "$release_json" | jq -r '.assets[].browser_download_url')
if [ "$useLite" = true ]; then if [ "$useLite" = true ]; then
@ -95,6 +89,45 @@ BuildWinArm64() {
go build -o "$1" -ldflags="$ldflags" -tags=jsoniter . go build -o "$1" -ldflags="$ldflags" -tags=jsoniter .
} }
BuildWin7() {
# Setup Win7 Go compiler (patched version that supports Windows 7)
go_version=$(go version | grep -o 'go[0-9]\+\.[0-9]\+\.[0-9]\+' | sed 's/go//')
echo "Detected Go version: $go_version"
curl -fsSL --retry 3 -o go-win7.zip -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/XTLS/go-win7/releases/download/patched-${go_version}/go-for-win7-linux-amd64.zip"
rm -rf go-win7
unzip go-win7.zip -d go-win7
rm go-win7.zip
# Set permissions for all wrapper files
chmod +x ./wrapper/zcc-win7
chmod +x ./wrapper/zcxx-win7
chmod +x ./wrapper/zcc-win7-386
chmod +x ./wrapper/zcxx-win7-386
# Build for both 386 and amd64 architectures
for arch in "386" "amd64"; do
echo "building for windows7-${arch}"
export GOOS=windows
export GOARCH=${arch}
export CGO_ENABLED=1
# Use architecture-specific wrapper files
if [ "$arch" = "386" ]; then
export CC=$(pwd)/wrapper/zcc-win7-386
export CXX=$(pwd)/wrapper/zcxx-win7-386
else
export CC=$(pwd)/wrapper/zcc-win7
export CXX=$(pwd)/wrapper/zcxx-win7
fi
# Use the patched Go compiler for Win7 compatibility
$(pwd)/go-win7/bin/go build -o "${1}-${arch}.exe" -ldflags="$ldflags" -tags=jsoniter .
done
}
BuildDev() { BuildDev() {
rm -rf .git/ rm -rf .git/
mkdir -p "dist" mkdir -p "dist"
@ -121,8 +154,8 @@ BuildDev() {
xgo -targets=windows/amd64,darwin/amd64,darwin/arm64 -out "$appName" -ldflags="$ldflags" -tags=jsoniter . xgo -targets=windows/amd64,darwin/amd64,darwin/arm64 -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
mv "$appName"-* dist mv "$appName"-* dist
cd dist cd dist
cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe # cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
upx -9 ./"$appName"-windows-amd64-upx.exe # upx -9 ./"$appName"-windows-amd64-upx.exe
find . -type f -print0 | xargs -0 md5sum >md5.txt find . -type f -print0 | xargs -0 md5sum >md5.txt
cat md5.txt cat md5.txt
} }
@ -134,7 +167,7 @@ BuildDocker() {
PrepareBuildDockerMusl() { PrepareBuildDockerMusl() {
mkdir -p build/musl-libs mkdir -p build/musl-libs
BASE="https://github.com/OpenListTeam/musl-compilers/releases/latest/download/" BASE="https://github.com/OpenListTeam/musl-compilers/releases/latest/download/"
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross) FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross loongarch64-linux-musl-cross) ## Disable s390x-linux-musl-cross builds
for i in "${FILES[@]}"; do for i in "${FILES[@]}"; do
url="${BASE}${i}.tgz" url="${BASE}${i}.tgz"
lib_tgz="build/${i}.tgz" lib_tgz="build/${i}.tgz"
@ -153,8 +186,8 @@ BuildDockerMultiplatform() {
docker_lflags="--extldflags '-static -fpic' $ldflags" docker_lflags="--extldflags '-static -fpic' $ldflags"
export CGO_ENABLED=1 export CGO_ENABLED=1
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x linux-riscv64 linux-ppc64le) OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-riscv64 linux-ppc64le linux-loong64) ## Disable linux-s390x builds
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc) CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc loongarch64-linux-musl-gcc) ## Disable s390x-linux-musl-gcc builds
for i in "${!OS_ARCHES[@]}"; do for i in "${!OS_ARCHES[@]}"; do
os_arch=${OS_ARCHES[$i]} os_arch=${OS_ARCHES[$i]}
cgo_cc=${CGO_ARGS[$i]} cgo_cc=${CGO_ARGS[$i]}
@ -186,12 +219,171 @@ BuildRelease() {
rm -rf .git/ rm -rf .git/
mkdir -p "build" mkdir -p "build"
BuildWinArm64 ./build/"$appName"-windows-arm64.exe BuildWinArm64 ./build/"$appName"-windows-arm64.exe
BuildWin7 ./build/"$appName"-windows7
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter . xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
# why? Because some target platforms seem to have issues with upx compression # why? Because some target platforms seem to have issues with upx compression
upx -9 ./"$appName"-linux-amd64 # upx -9 ./"$appName"-linux-amd64
cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe # cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
upx -9 ./"$appName"-windows-amd64-upx.exe # upx -9 ./"$appName"-windows-amd64-upx.exe
mv "$appName"-* build mv "$appName"-* build
# Build LoongArch with glibc (both old world abi1.0 and new world abi2.0)
# Separate from musl builds to avoid cache conflicts
BuildLoongGLIBC ./build/$appName-linux-loong64-abi1.0 abi1.0
BuildLoongGLIBC ./build/$appName-linux-loong64 abi2.0
}
BuildLoongGLIBC() {
local target_abi="$2"
local output_file="$1"
local oldWorldGoVersion="1.24.3"
if [ "$target_abi" = "abi1.0" ]; then
echo building for linux-loong64-abi1.0
else
echo building for linux-loong64-abi2.0
target_abi="abi2.0" # Default to abi2.0 if not specified
fi
# Note: No longer need global cache cleanup since ABI1.0 uses isolated cache directory
echo "Using optimized cache strategy: ABI1.0 has isolated cache, ABI2.0 uses standard cache"
if [ "$target_abi" = "abi1.0" ]; then
# Setup abi1.0 toolchain and patched Go compiler similar to cgo-action implementation
echo "Setting up Loongson old-world ABI1.0 toolchain and patched Go compiler..."
# Download and setup patched Go compiler for old-world
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
-o go-loong64-abi1.0.tar.gz; then
echo "Error: Failed to download patched Go compiler for old-world ABI1.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
-o go-loong64-abi1.0.tar.gz || true
fi
return 1
fi
rm -rf go-loong64-abi1.0
mkdir go-loong64-abi1.0
if ! tar -xzf go-loong64-abi1.0.tar.gz -C go-loong64-abi1.0 --strip-components=1; then
echo "Error: Failed to extract patched Go compiler"
return 1
fi
rm go-loong64-abi1.0.tar.gz
# Download and setup GCC toolchain for old-world
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/loongson-gnu-toolchain-8.3.novec-x86_64-loongarch64-linux-gnu-rc1.1.tar.xz" \
-o gcc8-loong64-abi1.0.tar.xz; then
echo "Error: Failed to download GCC toolchain for old-world ABI1.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/loongson-gnu-toolchain-8.3.novec-x86_64-loongarch64-linux-gnu-rc1.1.tar.xz" \
-o gcc8-loong64-abi1.0.tar.xz || true
fi
return 1
fi
rm -rf gcc8-loong64-abi1.0
mkdir gcc8-loong64-abi1.0
if ! tar -Jxf gcc8-loong64-abi1.0.tar.xz -C gcc8-loong64-abi1.0 --strip-components=1; then
echo "Error: Failed to extract GCC toolchain"
return 1
fi
rm gcc8-loong64-abi1.0.tar.xz
# Setup separate cache directory for ABI1.0 to avoid cache pollution
abi1_cache_dir="$(pwd)/go-loong64-abi1.0-cache"
mkdir -p "$abi1_cache_dir"
echo "Using separate cache directory for ABI1.0: $abi1_cache_dir"
# Use patched Go compiler for old-world build (critical for ABI1.0 compatibility)
echo "Building with patched Go compiler for old-world ABI1.0..."
echo "Using isolated cache directory: $abi1_cache_dir"
# Use env command to set environment variables locally without affecting global environment
if ! env GOOS=linux GOARCH=loong64 \
CC="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc" \
CXX="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++" \
CGO_ENABLED=1 \
GOCACHE="$abi1_cache_dir" \
$(pwd)/go-loong64-abi1.0/bin/go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed with patched Go compiler"
echo "Attempting retry with cache cleanup..."
env GOCACHE="$abi1_cache_dir" $(pwd)/go-loong64-abi1.0/bin/go clean -cache
if ! env GOOS=linux GOARCH=loong64 \
CC="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc" \
CXX="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++" \
CGO_ENABLED=1 \
GOCACHE="$abi1_cache_dir" \
$(pwd)/go-loong64-abi1.0/bin/go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed again after cache cleanup"
echo "Build environment details:"
echo "GOOS=linux"
echo "GOARCH=loong64"
echo "CC=$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc"
echo "CXX=$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++"
echo "CGO_ENABLED=1"
echo "GOCACHE=$abi1_cache_dir"
echo "Go version: $($(pwd)/go-loong64-abi1.0/bin/go version)"
echo "GCC version: $($(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc --version | head -1)"
return 1
fi
fi
else
# Setup abi2.0 toolchain for new world glibc build
echo "Setting up new-world ABI2.0 toolchain..."
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/cross-tools/releases/download/20250507/x86_64-cross-tools-loongarch64-unknown-linux-gnu-legacy.tar.xz" \
-o gcc12-loong64-abi2.0.tar.xz; then
echo "Error: Failed to download GCC toolchain for new-world ABI2.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/cross-tools/releases/download/20250507/x86_64-cross-tools-loongarch64-unknown-linux-gnu-legacy.tar.xz" \
-o gcc12-loong64-abi2.0.tar.xz || true
fi
return 1
fi
rm -rf gcc12-loong64-abi2.0
mkdir gcc12-loong64-abi2.0
if ! tar -Jxf gcc12-loong64-abi2.0.tar.xz -C gcc12-loong64-abi2.0 --strip-components=1; then
echo "Error: Failed to extract GCC toolchain"
return 1
fi
rm gcc12-loong64-abi2.0.tar.xz
export GOOS=linux
export GOARCH=loong64
export CC=$(pwd)/gcc12-loong64-abi2.0/bin/loongarch64-unknown-linux-gnu-gcc
export CXX=$(pwd)/gcc12-loong64-abi2.0/bin/loongarch64-unknown-linux-gnu-g++
export CGO_ENABLED=1
# Use standard Go compiler for new-world build
echo "Building with standard Go compiler for new-world ABI2.0..."
if ! go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed with standard Go compiler"
echo "Attempting retry with cache cleanup..."
go clean -cache
if ! go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed again after cache cleanup"
echo "Build environment details:"
echo "GOOS=$GOOS"
echo "GOARCH=$GOARCH"
echo "CC=$CC"
echo "CXX=$CXX"
echo "CGO_ENABLED=$CGO_ENABLED"
echo "Go version: $(go version)"
echo "GCC version: $($CC --version | head -1)"
return 1
fi
fi
fi
} }
BuildReleaseLinuxMusl() { BuildReleaseLinuxMusl() {
@ -249,6 +441,7 @@ BuildReleaseLinuxMuslArm() {
done done
} }
BuildReleaseAndroid() { BuildReleaseAndroid() {
rm -rf .git/ rm -rf .git/
mkdir -p "build" mkdir -p "build"
@ -278,6 +471,7 @@ BuildReleaseFreeBSD() {
freebsd_version=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/freebsd/freebsd-src/tags\"" | \ freebsd_version=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/freebsd/freebsd-src/tags\"" | \
jq -r '.[].name' | \ jq -r '.[].name' | \
grep '^release/14\.' | \ grep '^release/14\.' | \
grep -v -- '-p[0-9]*$' | \
sort -V | \ sort -V | \
tail -1 | \ tail -1 | \
sed 's/release\///' | \ sed 's/release\///' | \
@ -343,7 +537,7 @@ MakeRelease() {
tar -czvf compress/"$i$liteSuffix".tar.gz "$appName" tar -czvf compress/"$i$liteSuffix".tar.gz "$appName"
rm -f "$appName" rm -f "$appName"
done done
for i in $(find . -type f -name "$appName-windows-*"); do for i in $(find . -type f \( -name "$appName-windows-*" -o -name "$appName-windows7-*" \)); do
cp "$i" "$appName".exe cp "$i" "$appName".exe
zip compress/$(echo $i | sed 's/\.[^.]*$//')$liteSuffix.zip "$appName".exe zip compress/$(echo $i | sed 's/\.[^.]*$//')$liteSuffix.zip "$appName".exe
rm -f "$appName".exe rm -f "$appName".exe
@ -390,7 +584,7 @@ for arg in "$@"; do
done done
if [ "$buildType" = "dev" ]; then if [ "$buildType" = "dev" ]; then
FetchWebDev FetchWebRolling
if [ "$dockerType" = "docker" ]; then if [ "$dockerType" = "docker" ]; then
BuildDocker BuildDocker
elif [ "$dockerType" = "docker-multiplatform" ]; then elif [ "$dockerType" = "docker-multiplatform" ]; then
@ -402,7 +596,7 @@ if [ "$buildType" = "dev" ]; then
fi fi
elif [ "$buildType" = "release" -o "$buildType" = "beta" ]; then elif [ "$buildType" = "release" -o "$buildType" = "beta" ]; then
if [ "$buildType" = "beta" ]; then if [ "$buildType" = "beta" ]; then
FetchWebDev FetchWebRolling
else else
FetchWebRelease FetchWebRelease
fi fi
@ -483,4 +677,5 @@ else
echo -e " $0 release" echo -e " $0 release"
echo -e " $0 release lite" echo -e " $0 release lite"
echo -e " $0 release docker lite" echo -e " $0 release docker lite"
echo -e " $0 release linux_musl"
fi fi

View File

@ -4,6 +4,8 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
package cmd package cmd
import ( import (
"fmt"
"github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting" "github.com/OpenListTeam/OpenList/v4/internal/setting"
@ -24,10 +26,11 @@ var AdminCmd = &cobra.Command{
if err != nil { if err != nil {
utils.Log.Errorf("failed get admin user: %+v", err) utils.Log.Errorf("failed get admin user: %+v", err)
} else { } else {
utils.Log.Infof("Admin user's username: %s", admin.Username) utils.Log.Infof("get admin user from CLI")
utils.Log.Infof("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed") fmt.Println("Admin user's username:", admin.Username)
utils.Log.Infof("You can reset the password with a random string by running [openlist admin random]") fmt.Println("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed")
utils.Log.Infof("You can also set a new password by running [openlist admin set NEW_PASSWORD]") fmt.Println("You can reset the password with a random string by running [openlist admin random]")
fmt.Println("You can also set a new password by running [openlist admin set NEW_PASSWORD]")
} }
}, },
} }
@ -36,6 +39,7 @@ var RandomPasswordCmd = &cobra.Command{
Use: "random", Use: "random",
Short: "Reset admin user's password to a random string", Short: "Reset admin user's password to a random string",
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
utils.Log.Infof("reset admin user's password to a random string from CLI")
newPwd := random.String(8) newPwd := random.String(8)
setAdminPassword(newPwd) setAdminPassword(newPwd)
}, },
@ -44,12 +48,12 @@ var RandomPasswordCmd = &cobra.Command{
var SetPasswordCmd = &cobra.Command{ var SetPasswordCmd = &cobra.Command{
Use: "set", Use: "set",
Short: "Set admin user's password", Short: "Set admin user's password",
Run: func(cmd *cobra.Command, args []string) { RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 { if len(args) == 0 {
utils.Log.Errorf("Please enter the new password") return fmt.Errorf("Please enter the new password")
return
} }
setAdminPassword(args[0]) setAdminPassword(args[0])
return nil
}, },
} }
@ -60,7 +64,8 @@ var ShowTokenCmd = &cobra.Command{
Init() Init()
defer Release() defer Release()
token := setting.GetStr(conf.Token) token := setting.GetStr(conf.Token)
utils.Log.Infof("Admin token: %s", token) utils.Log.Infof("show admin token from CLI")
fmt.Println("Admin token:", token)
}, },
} }
@ -77,9 +82,10 @@ func setAdminPassword(pwd string) {
utils.Log.Errorf("failed update admin user: %+v", err) utils.Log.Errorf("failed update admin user: %+v", err)
return return
} }
utils.Log.Infof("admin user has been updated:") utils.Log.Infof("admin user has been update from CLI")
utils.Log.Infof("username: %s", admin.Username) fmt.Println("admin user has been updated:")
utils.Log.Infof("password: %s", pwd) fmt.Println("username:", admin.Username)
fmt.Println("password:", pwd)
DelAdminCacheOnline() DelAdminCacheOnline()
} }

View File

@ -4,6 +4,8 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
package cmd package cmd
import ( import (
"fmt"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -24,7 +26,8 @@ var Cancel2FACmd = &cobra.Command{
if err != nil { if err != nil {
utils.Log.Errorf("failed to cancel 2FA: %+v", err) utils.Log.Errorf("failed to cancel 2FA: %+v", err)
} else { } else {
utils.Log.Info("2FA canceled") utils.Log.Infof("2FA is canceled from CLI")
fmt.Println("2FA canceled")
DelAdminCacheOnline() DelAdminCacheOnline()
} }
} }

View File

@ -16,7 +16,7 @@ var RootCmd = &cobra.Command{
Short: "A file list program that supports multiple storage.", Short: "A file list program that supports multiple storage.",
Long: `A file list program that supports multiple storage, Long: `A file list program that supports multiple storage,
built with love by OpenListTeam. built with love by OpenListTeam.
Complete documentation is available at https://docs.openlist.team/`, Complete documentation is available at https://doc.oplist.org/`,
} }
func Execute() { func Execute() {

View File

@ -19,6 +19,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/fs" "github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server" "github.com/OpenListTeam/OpenList/v4/server"
"github.com/OpenListTeam/OpenList/v4/server/middlewares"
"github.com/OpenListTeam/sftpd-openlist" "github.com/OpenListTeam/sftpd-openlist"
ftpserver "github.com/fclairamb/ftpserverlib" ftpserver "github.com/fclairamb/ftpserverlib"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
@ -47,7 +48,15 @@ the address is defined in config file`,
gin.SetMode(gin.ReleaseMode) gin.SetMode(gin.ReleaseMode)
} }
r := gin.New() r := gin.New()
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
// gin log
if conf.Conf.Log.Filter.Enable {
r.Use(middlewares.FilteredLogger())
} else {
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out))
}
r.Use(gin.RecoveryWithWriter(log.StandardLogger().Out))
server.Init(r) server.Init(r)
var httpHandler http.Handler = r var httpHandler http.Handler = r
if conf.Conf.Scheme.EnableH2c { if conf.Conf.Scheme.EnableH2c {
@ -56,6 +65,7 @@ the address is defined in config file`,
var httpSrv, httpsSrv, unixSrv *http.Server var httpSrv, httpsSrv, unixSrv *http.Server
if conf.Conf.Scheme.HttpPort != -1 { if conf.Conf.Scheme.HttpPort != -1 {
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort) httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
fmt.Printf("start HTTP server @ %s\n", httpBase)
utils.Log.Infof("start HTTP server @ %s", httpBase) utils.Log.Infof("start HTTP server @ %s", httpBase)
httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler} httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler}
go func() { go func() {
@ -67,6 +77,7 @@ the address is defined in config file`,
} }
if conf.Conf.Scheme.HttpsPort != -1 { if conf.Conf.Scheme.HttpsPort != -1 {
httpsBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpsPort) httpsBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpsPort)
fmt.Printf("start HTTPS server @ %s\n", httpsBase)
utils.Log.Infof("start HTTPS server @ %s", httpsBase) utils.Log.Infof("start HTTPS server @ %s", httpsBase)
httpsSrv = &http.Server{Addr: httpsBase, Handler: r} httpsSrv = &http.Server{Addr: httpsBase, Handler: r}
go func() { go func() {
@ -77,6 +88,7 @@ the address is defined in config file`,
}() }()
} }
if conf.Conf.Scheme.UnixFile != "" { if conf.Conf.Scheme.UnixFile != "" {
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile) utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile)
unixSrv = &http.Server{Handler: httpHandler} unixSrv = &http.Server{Handler: httpHandler}
go func() { go func() {
@ -105,6 +117,7 @@ the address is defined in config file`,
s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out)) s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
server.InitS3(s3r) server.InitS3(s3r)
s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port) s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port)
fmt.Printf("start S3 server @ %s\n", s3Base)
utils.Log.Infof("start S3 server @ %s", s3Base) utils.Log.Infof("start S3 server @ %s", s3Base)
go func() { go func() {
var err error var err error
@ -129,6 +142,7 @@ the address is defined in config file`,
if err != nil { if err != nil {
utils.Log.Fatalf("failed to start ftp driver: %s", err.Error()) utils.Log.Fatalf("failed to start ftp driver: %s", err.Error())
} else { } else {
fmt.Printf("start ftp server on %s\n", conf.Conf.FTP.Listen)
utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen) utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen)
go func() { go func() {
ftpServer = ftpserver.NewFtpServer(ftpDriver) ftpServer = ftpserver.NewFtpServer(ftpDriver)
@ -147,6 +161,7 @@ the address is defined in config file`,
if err != nil { if err != nil {
utils.Log.Fatalf("failed to start sftp driver: %s", err.Error()) utils.Log.Fatalf("failed to start sftp driver: %s", err.Error())
} else { } else {
fmt.Printf("start sftp server on %s", conf.Conf.SFTP.Listen)
utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen) utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen)
go func() { go func() {
sftpServer = sftpd.NewSftpServer(sftpDriver) sftpServer = sftpd.NewSftpServer(sftpDriver)

View File

@ -4,6 +4,7 @@ Copyright © 2023 NAME HERE <EMAIL ADDRESS>
package cmd package cmd
import ( import (
"fmt"
"os" "os"
"strconv" "strconv"
@ -22,28 +23,61 @@ var storageCmd = &cobra.Command{
} }
var disableStorageCmd = &cobra.Command{ var disableStorageCmd = &cobra.Command{
Use: "disable", Use: "disable [mount path]",
Short: "Disable a storage", Short: "Disable a storage by mount path",
Run: func(cmd *cobra.Command, args []string) { RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 { if len(args) < 1 {
utils.Log.Errorf("mount path is required") return fmt.Errorf("mount path is required")
return
} }
mountPath := args[0] mountPath := args[0]
Init() Init()
defer Release() defer Release()
storage, err := db.GetStorageByMountPath(mountPath) storage, err := db.GetStorageByMountPath(mountPath)
if err != nil { if err != nil {
utils.Log.Errorf("failed to query storage: %+v", err) return fmt.Errorf("failed to query storage: %+v", err)
} else { }
storage.Disabled = true storage.Disabled = true
err = db.UpdateStorage(storage) err = db.UpdateStorage(storage)
if err != nil { if err != nil {
utils.Log.Errorf("failed to update storage: %+v", err) return fmt.Errorf("failed to update storage: %+v", err)
} else { }
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath) utils.Log.Infof("Storage with mount path [%s] has been disabled from CLI", mountPath)
fmt.Printf("Storage with mount path [%s] has been disabled\n", mountPath)
return nil
},
}
var deleteStorageCmd = &cobra.Command{
Use: "delete [id]",
Short: "Delete a storage by id",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return fmt.Errorf("id is required")
}
id, err := strconv.Atoi(args[0])
if err != nil {
return fmt.Errorf("id must be a number")
}
if force, _ := cmd.Flags().GetBool("force"); force {
fmt.Printf("Are you sure you want to delete storage with id [%d]? [y/N]: ", id)
var confirm string
fmt.Scanln(&confirm)
if confirm != "y" && confirm != "Y" {
fmt.Println("Delete operation cancelled.")
return nil
} }
} }
Init()
defer Release()
err = db.DeleteStorageById(uint(id))
if err != nil {
return fmt.Errorf("failed to delete storage by id: %+v", err)
}
utils.Log.Infof("Storage with id [%d] have been deleted from CLI", id)
fmt.Printf("Storage with id [%d] have been deleted\n", id)
return nil
}, },
} }
@ -88,14 +122,14 @@ var storageTableHeight int
var listStorageCmd = &cobra.Command{ var listStorageCmd = &cobra.Command{
Use: "list", Use: "list",
Short: "List all storages", Short: "List all storages",
Run: func(cmd *cobra.Command, args []string) { RunE: func(cmd *cobra.Command, args []string) error {
Init() Init()
defer Release() defer Release()
storages, _, err := db.GetStorages(1, -1) storages, _, err := db.GetStorages(1, -1)
if err != nil { if err != nil {
utils.Log.Errorf("failed to query storages: %+v", err) return fmt.Errorf("failed to query storages: %+v", err)
} else { } else {
utils.Log.Infof("Found %d storages", len(storages)) fmt.Printf("Found %d storages\n", len(storages))
columns := []table.Column{ columns := []table.Column{
{Title: "ID", Width: 4}, {Title: "ID", Width: 4},
{Title: "Driver", Width: 16}, {Title: "Driver", Width: 16},
@ -138,10 +172,11 @@ var listStorageCmd = &cobra.Command{
m := model{t} m := model{t}
if _, err := tea.NewProgram(m).Run(); err != nil { if _, err := tea.NewProgram(m).Run(); err != nil {
utils.Log.Errorf("failed to run program: %+v", err) fmt.Printf("failed to run program: %+v\n", err)
os.Exit(1) os.Exit(1)
} }
} }
return nil
}, },
} }
@ -151,6 +186,8 @@ func init() {
storageCmd.AddCommand(disableStorageCmd) storageCmd.AddCommand(disableStorageCmd)
storageCmd.AddCommand(listStorageCmd) storageCmd.AddCommand(listStorageCmd)
storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height") storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height")
storageCmd.AddCommand(deleteStorageCmd)
deleteStorageCmd.Flags().BoolP("force", "f", false, "Force delete without confirmation")
// Here you will define your flags and configuration settings. // Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command // Cobra supports Persistent Flags which will work for this command

View File

@ -186,7 +186,7 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
preHash = strings.ToUpper(preHash) preHash = strings.ToUpper(preHash)
fullHash := stream.GetHash().GetHash(utils.SHA1) fullHash := stream.GetHash().GetHash(utils.SHA1)
if len(fullHash) != utils.SHA1.Width { if len(fullHash) != utils.SHA1.Width {
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA1) _, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -18,7 +18,6 @@ var config = driver.Config{
Name: "115 Cloud", Name: "115 Cloud",
DefaultRoot: "0", DefaultRoot: "0",
// OnlyProxy: true, // OnlyProxy: true,
// OnlyLocal: true,
// NoOverwriteUpload: true, // NoOverwriteUpload: true,
} }

View File

@ -321,7 +321,7 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
err error err error
) )
tmpF, err := s.CacheFullInTempFile() tmpF, err := s.CacheFullAndWriter(&up, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -8,6 +8,7 @@ import (
"strings" "strings"
"time" "time"
sdk "github.com/OpenListTeam/115-sdk-go"
"github.com/OpenListTeam/OpenList/v4/cmd/flags" "github.com/OpenListTeam/OpenList/v4/cmd/flags"
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
@ -16,7 +17,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range" "github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
sdk "github.com/OpenListTeam/115-sdk-go"
"golang.org/x/time/rate" "golang.org/x/time/rate"
) )
@ -131,6 +131,23 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}, nil }, nil
} }
func (d *Open115) GetObjInfo(ctx context.Context, path string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
resp, err := d.client.GetFolderInfoByPath(ctx, path)
if err != nil {
return nil, err
}
return &Obj{
Fid: resp.FileID,
Fn: resp.FileName,
Fc: resp.FileCategory,
Sha1: resp.Sha1,
Pc: resp.PickCode,
}, nil
}
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil { if err := d.WaitLimit(ctx); err != nil {
return nil, err return nil, err
@ -222,7 +239,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
} }
sha1 := file.GetHash().GetHash(utils.SHA1) sha1 := file.GetHash().GetHash(utils.SHA1)
if len(sha1) != utils.SHA1.Width { if len(sha1) != utils.SHA1.Width {
_, sha1, err = stream.CacheFullInTempFileAndHash(file, utils.SHA1) _, sha1, err = stream.CacheFullAndHash(file, &up, utils.SHA1)
if err != nil { if err != nil {
return err return err
} }
@ -252,6 +269,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
return err return err
} }
if resp.Status == 2 { if resp.Status == 2 {
up(100)
return nil return nil
} }
// 2. two way verify // 2. two way verify
@ -286,6 +304,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
return err return err
} }
if resp.Status == 2 { if resp.Status == 2 {
up(100)
return nil return nil
} }
} }
@ -302,6 +321,22 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
return nil return nil
} }
func (d *Open115) OfflineDownload(ctx context.Context, uris []string, dstDir model.Obj) ([]string, error) {
return d.client.AddOfflineTaskURIs(ctx, uris, dstDir.GetID())
}
func (d *Open115) DeleteOfflineTask(ctx context.Context, infoHash string, deleteFiles bool) error {
return d.client.DeleteOfflineTask(ctx, infoHash, deleteFiles)
}
func (d *Open115) OfflineList(ctx context.Context) (*sdk.OfflineTaskListResp, error) {
resp, err := d.client.OfflineTaskList(ctx, 1)
if err != nil {
return nil, err
}
return resp, nil
}
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { // func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional // // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
// return nil, errs.NotImplement // return nil, errs.NotImplement

View File

@ -11,23 +11,14 @@ type Addition struct {
// define other // define other
OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"` OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"` OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
LimitRate float64 `json:"limit_rate,string" type:"float" default:"1" help:"limit all api request rate ([limit]r/1s)"` LimitRate float64 `json:"limit_rate" type:"float" default:"1" help:"limit all api request rate ([limit]r/1s)"`
AccessToken string `json:"access_token" required:"true"` AccessToken string `json:"access_token" required:"true"`
RefreshToken string `json:"refresh_token" required:"true"` RefreshToken string `json:"refresh_token" required:"true"`
} }
var config = driver.Config{ var config = driver.Config{
Name: "115 Open", Name: "115 Open",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "0", DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
} }
func init() { func init() {

View File

@ -6,12 +6,13 @@ import (
"io" "io"
"time" "time"
sdk "github.com/OpenListTeam/115-sdk-go"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss" "github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/avast/retry-go" "github.com/avast/retry-go"
sdk "github.com/OpenListTeam/115-sdk-go"
) )
func calPartSize(fileSize int64) int64 { func calPartSize(fileSize int64) int64 {
@ -69,9 +70,6 @@ func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp
// } // }
func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error { func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
fileSize := stream.GetSize()
chunkSize := calPartSize(fileSize)
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken)) ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
if err != nil { if err != nil {
return err return err
@ -86,6 +84,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
return err return err
} }
fileSize := stream.GetSize()
chunkSize := calPartSize(fileSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize), &up)
if err != nil {
return err
}
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
parts := make([]oss.UploadPart, partNum) parts := make([]oss.UploadPart, partNum)
offset := int64(0) offset := int64(0)
@ -98,10 +103,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
if i == partNum { if i == partNum {
partSize = fileSize - (i-1)*chunkSize partSize = fileSize - (i-1)*chunkSize
} }
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize)) rd, err := ss.GetSectionReader(offset, partSize)
err = retry.Do(func() error { if err != nil {
_ = rd.Reset() return err
}
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd) rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
err = retry.Do(func() error {
rd.Seek(0, io.SeekStart)
part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i)) part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i))
if err != nil { if err != nil {
return err return err
@ -112,6 +120,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
retry.Attempts(3), retry.Attempts(3),
retry.DelayType(retry.BackOffDelay), retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second)) retry.Delay(time.Second))
ss.FreeSectionReader(rd)
if err != nil { if err != nil {
return err return err
} }
@ -121,7 +130,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
} else { } else {
offset += partSize offset += partSize
} }
up(float64(offset) / float64(fileSize)) up(float64(offset) * 100 / float64(fileSize))
} }
// callbackRespBytes := make([]byte, 1024) // callbackRespBytes := make([]byte, 1024)

View File

@ -19,11 +19,6 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "115 Share", Name: "115 Share",
DefaultRoot: "0", DefaultRoot: "0",
// OnlyProxy: true,
// OnlyLocal: true,
CheckStatus: false,
Alert: "",
NoOverwriteUpload: true,
NoUpload: true, NoUpload: true,
} }

View File

@ -64,14 +64,6 @@ func (d *Pan123) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if f, ok := file.(File); ok { if f, ok := file.(File); ok {
//var resp DownResp
var headers map[string]string
if !utils.IsLocalIPAddr(args.IP) {
headers = map[string]string{
//"X-Real-IP": "1.1.1.1",
"X-Forwarded-For": args.IP,
}
}
data := base.Json{ data := base.Json{
"driveId": 0, "driveId": 0,
"etag": f.Etag, "etag": f.Etag,
@ -83,25 +75,27 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
} }
resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) { resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetHeaders(headers) req.SetBody(data)
}, nil) }, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
downloadUrl := utils.Json.Get(resp, "data", "DownloadUrl").ToString() downloadUrl := utils.Json.Get(resp, "data", "DownloadUrl").ToString()
u, err := url.Parse(downloadUrl) ou, err := url.Parse(downloadUrl)
if err != nil { if err != nil {
return nil, err return nil, err
} }
nu := u.Query().Get("params") u_ := ou.String()
nu := ou.Query().Get("params")
if nu != "" { if nu != "" {
du, _ := base64.StdEncoding.DecodeString(nu) du, _ := base64.StdEncoding.DecodeString(nu)
u, err = url.Parse(string(du)) u, err := url.Parse(string(du))
if err != nil { if err != nil {
return nil, err return nil, err
} }
u_ = u.String()
} }
u_ := u.String()
log.Debug("download url: ", u_) log.Debug("download url: ", u_)
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_) res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
if err != nil { if err != nil {
@ -118,7 +112,7 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString() link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
} }
link.Header = http.Header{ link.Header = http.Header{
"Referer": []string{"https://www.123pan.com/"}, "Referer": []string{fmt.Sprintf("%s://%s/", ou.Scheme, ou.Host)},
} }
return &link, nil return &link, nil
} else { } else {
@ -188,7 +182,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
etag := file.GetHash().GetHash(utils.MD5) etag := file.GetHash().GetHash(utils.MD5)
var err error var err error
if len(etag) < utils.MD5.Width { if len(etag) < utils.MD5.Width {
_, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5) _, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
if err != nil { if err != nil {
return err return err
} }

View File

@ -12,6 +12,7 @@ type Addition struct {
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"` //OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` //OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
AccessToken string AccessToken string
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
} }
var config = driver.Config{ var config = driver.Config{
@ -22,6 +23,11 @@ var config = driver.Config{
func init() { func init() {
op.RegisterDriver(func() driver.Driver { op.RegisterDriver(func() driver.Driver {
return &Pan123{} // 新增默认选项 要在RegisterDriver初始化设置 才会对正在使用的用户生效
return &Pan123{
Addition: Addition{
UploadThread: 3,
},
}
}) })
} }

View File

@ -6,11 +6,16 @@ import (
"io" "io"
"net/http" "net/http"
"strconv" "strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
) )
@ -69,18 +74,21 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
} }
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error { func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
tmpF, err := file.CacheFullInTempFile() // fetch s3 pre signed urls
size := file.GetSize()
chunkSize := int64(16 * utils.MB)
chunkCount := 1
if size > chunkSize {
chunkCount = int((size + chunkSize - 1) / chunkSize)
}
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil { if err != nil {
return err return err
} }
// fetch s3 pre signed urls
size := file.GetSize()
chunkSize := min(size, 16*utils.MB)
chunkCount := int(size / chunkSize)
lastChunkSize := size % chunkSize lastChunkSize := size % chunkSize
if lastChunkSize > 0 { if lastChunkSize == 0 {
chunkCount++
} else {
lastChunkSize = chunkSize lastChunkSize = chunkSize
} }
// only 1 batch is allowed // only 1 batch is allowed
@ -90,46 +98,57 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
batchSize = 10 batchSize = 10
getS3UploadUrl = d.getS3PreSignedUrls getS3UploadUrl = d.getS3PreSignedUrls
} }
thread := min(int(chunkCount), d.UploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for i := 1; i <= chunkCount; i += batchSize { for i := 1; i <= chunkCount; i += batchSize {
if utils.IsCanceled(ctx) { if utils.IsCanceled(uploadCtx) {
return ctx.Err() break
} }
start := i start := i
end := min(i+batchSize, chunkCount+1) end := min(i+batchSize, chunkCount+1)
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end) s3PreSignedUrls, err := getS3UploadUrl(uploadCtx, upReq, start, end)
if err != nil { if err != nil {
return err return err
} }
// upload each chunk // upload each chunk
for j := start; j < end; j++ { for cur := start; cur < end; cur++ {
if utils.IsCanceled(ctx) { if utils.IsCanceled(uploadCtx) {
return ctx.Err() break
} }
offset := int64(cur-1) * chunkSize
curSize := chunkSize curSize := chunkSize
if j == chunkCount { if cur == chunkCount {
curSize = lastChunkSize curSize = lastChunkSize
} }
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.NewSectionReader(tmpF, chunkSize*int64(j-1), curSize), curSize, false, getS3UploadUrl) var reader *stream.SectionReader
var rateLimitedRd io.Reader
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
reader, err = ss.GetSectionReader(offset, curSize)
if err != nil { if err != nil {
return err return err
} }
up(float64(j) * 100 / float64(chunkCount)) rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
} }
} return nil
// complete s3 upload },
return d.completeS3(ctx, upReq, file, chunkCount > 1) Do: func(ctx context.Context) error {
} reader.Seek(0, io.SeekStart)
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader *io.SectionReader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)] uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
if uploadUrl == "" { if uploadUrl == "" {
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls) return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
} }
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, reader)) reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, rateLimitedRd)
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
req.ContentLength = curSize req.ContentLength = curSize
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10)) //req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
res, err := base.HttpClient.Do(req) res, err := base.HttpClient.Do(req)
@ -138,18 +157,18 @@ func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSign
} }
defer res.Body.Close() defer res.Body.Close()
if res.StatusCode == http.StatusForbidden { if res.StatusCode == http.StatusForbidden {
if retry { singleflight.AnyGroup.Do(fmt.Sprintf("Pan123.newUpload_%p", threadG), func() (any, error) {
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
}
// refresh s3 pre signed urls
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end) newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
if err != nil {
return nil, err
}
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
return nil, nil
})
if err != nil { if err != nil {
return err return err
} }
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
// retry
reader.Seek(0, io.SeekStart)
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
} }
if res.StatusCode != http.StatusOK { if res.StatusCode != http.StatusOK {
body, err := io.ReadAll(res.Body) body, err := io.ReadAll(res.Body)
@ -158,5 +177,20 @@ func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSign
} }
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body) return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
} }
progress := 10.0 + 85.0*float64(threadG.Success())/float64(chunkCount)
up(progress)
return nil return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
})
}
}
if err := threadG.Wait(); err != nil {
return err
}
defer up(100)
// complete s3 upload
return d.completeS3(ctx, upReq, file, chunkCount > 1)
} }

View File

@ -2,7 +2,9 @@ package _123_open
import ( import (
"context" "context"
"fmt"
"strconv" "strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
@ -95,6 +97,22 @@ func (d *Open123) Rename(ctx context.Context, srcObj model.Obj, newName string)
} }
func (d *Open123) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *Open123) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
// 尝试使用上传+MD5秒传功能实现复制
// 1. 创建文件
// parentFileID 父目录id上传到根目录时填写 0
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
if err != nil {
return fmt.Errorf("parse parentFileID error: %v", err)
}
etag := srcObj.(File).Etag
createResp, err := d.create(parentFileId, srcObj.GetName(), etag, srcObj.GetSize(), 2, false)
if err != nil {
return err
}
// 是否秒传
if createResp.Data.Reuse {
return nil
}
return errs.NotSupport return errs.NotSupport
} }
@ -104,26 +122,64 @@ func (d *Open123) Remove(ctx context.Context, obj model.Obj) error {
return d.trash(fileId) return d.trash(fileId)
} }
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// 1. 创建文件
// parentFileID 父目录id上传到根目录时填写 0
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64) parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
etag := file.GetHash().GetHash(utils.MD5)
if len(etag) < utils.MD5.Width {
_, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5)
if err != nil { if err != nil {
return err return nil, fmt.Errorf("parse parentFileID error: %v", err)
}
// etag 文件md5
etag := file.GetHash().GetHash(utils.MD5)
if len(etag) < utils.MD5.Width {
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
if err != nil {
return nil, err
} }
} }
createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false) createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false)
if err != nil { if err != nil {
return err return nil, err
} }
// 是否秒传
if createResp.Data.Reuse { if createResp.Data.Reuse {
return nil // 秒传成功才会返回正确的 FileID否则为 0
if createResp.Data.FileID != 0 {
return File{
FileName: file.GetName(),
Size: file.GetSize(),
FileId: createResp.Data.FileID,
Type: 2,
Etag: etag,
}, nil
}
} }
up(10)
return d.Upload(ctx, file, createResp, up) // 2. 上传分片
err = d.Upload(ctx, file, createResp, up)
if err != nil {
return nil, err
}
// 3. 上传完毕
for range 60 {
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
// 返回错误代码未知20103文档也没有具体说
if err == nil && uploadCompleteResp.Data.Completed && uploadCompleteResp.Data.FileID != 0 {
up(100)
return File{
FileName: file.GetName(),
Size: file.GetSize(),
FileId: uploadCompleteResp.Data.FileID,
Type: 2,
Etag: etag,
}, nil
}
// 若接口返回的completed为 false 时则需间隔1秒继续轮询此接口获取上传最终结果。
time.Sleep(time.Second)
}
return nil, fmt.Errorf("upload complete timeout")
} }
var _ driver.Driver = (*Open123)(nil) var _ driver.Driver = (*Open123)(nil)
var _ driver.PutResult = (*Open123)(nil)

View File

@ -73,7 +73,9 @@ func (f File) GetName() string {
} }
func (f File) CreateTime() time.Time { func (f File) CreateTime() time.Time {
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.CreateAt) // 返回的时间没有时区信息,默认 UTC+8
loc := time.FixedZone("UTC+8", 8*60*60)
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.CreateAt, loc)
if err != nil { if err != nil {
return time.Now() return time.Now()
} }
@ -81,7 +83,9 @@ func (f File) CreateTime() time.Time {
} }
func (f File) ModTime() time.Time { func (f File) ModTime() time.Time {
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.UpdateAt) // 返回的时间没有时区信息,默认 UTC+8
loc := time.FixedZone("UTC+8", 8*60*60)
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.UpdateAt, loc)
if err != nil { if err != nil {
return time.Now() return time.Now()
} }
@ -154,6 +158,7 @@ type DownloadInfoResp struct {
} `json:"data"` } `json:"data"`
} }
// 创建文件V2返回
type UploadCreateResp struct { type UploadCreateResp struct {
BaseResp BaseResp
Data struct { Data struct {
@ -161,45 +166,15 @@ type UploadCreateResp struct {
PreuploadID string `json:"preuploadID"` PreuploadID string `json:"preuploadID"`
Reuse bool `json:"reuse"` Reuse bool `json:"reuse"`
SliceSize int64 `json:"sliceSize"` SliceSize int64 `json:"sliceSize"`
Servers []string `json:"servers"`
} `json:"data"` } `json:"data"`
} }
type UploadUrlResp struct { // 上传完毕V2返回
BaseResp
Data struct {
PresignedURL string `json:"presignedURL"`
}
}
type UploadCompleteResp struct { type UploadCompleteResp struct {
BaseResp BaseResp
Data struct { Data struct {
Async bool `json:"async"`
Completed bool `json:"completed"` Completed bool `json:"completed"`
FileID int64 `json:"fileID"` FileID int64 `json:"fileID"`
} `json:"data"` } `json:"data"`
} }
type UploadAsyncResp struct {
BaseResp
Data struct {
Completed bool `json:"completed"`
FileID int64 `json:"fileID"`
} `json:"data"`
}
type UploadResp struct {
BaseResp
Data struct {
AccessKeyId string `json:"AccessKeyId"`
Bucket string `json:"Bucket"`
Key string `json:"Key"`
SecretAccessKey string `json:"SecretAccessKey"`
SessionToken string `json:"SessionToken"`
FileId int64 `json:"FileId"`
Reuse bool `json:"Reuse"`
EndPoint string `json:"EndPoint"`
StorageNode string `json:"StorageNode"`
UploadId string `json:"UploadId"`
} `json:"data"`
}

View File

@ -1,21 +1,28 @@
package _123_open package _123_open
import ( import (
"bytes"
"context" "context"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http" "net/http"
"strconv"
"strings" "strings"
"time" "time"
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup" "github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go" "github.com/avast/retry-go"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
) )
// 创建文件 V2
func (d *Open123) create(parentFileID int64, filename string, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) { func (d *Open123) create(parentFileID int64, filename string, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) {
var resp UploadCreateResp var resp UploadCreateResp
_, err := d.Request(UploadCreate, http.MethodPost, func(req *resty.Request) { _, err := d.Request(UploadCreate, http.MethodPost, func(req *resty.Request) {
@ -34,21 +41,137 @@ func (d *Open123) create(parentFileID int64, filename string, etag string, size
return &resp, nil return &resp, nil
} }
func (d *Open123) url(preuploadID string, sliceNo int64) (string, error) { // 上传分片 V2
// get upload url func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
var resp UploadUrlResp uploadDomain := createResp.Data.Servers[0]
_, err := d.Request(UploadUrl, http.MethodPost, func(req *resty.Request) { size := file.GetSize()
req.SetBody(base.Json{ chunkSize := createResp.Data.SliceSize
"preuploadId": preuploadID,
"sliceNo": sliceNo, ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
})
}, &resp)
if err != nil { if err != nil {
return "", err return err
} }
return resp.Data.PresignedURL, nil
uploadNums := (size + chunkSize - 1) / chunkSize
thread := min(int(uploadNums), d.UploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for partIndex := range uploadNums {
if utils.IsCanceled(uploadCtx) {
break
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片号从1开始
offset := partIndex * chunkSize
size := min(chunkSize, size-offset)
var reader *stream.SectionReader
var rateLimitedRd io.Reader
sliceMD5 := ""
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
// 每个分片一个reader
reader, err = ss.GetSectionReader(offset, size)
if err != nil {
return err
}
// 计算当前分片的MD5
sliceMD5, err = utils.HashReader(utils.MD5, reader)
if err != nil {
return err
}
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
return nil
},
Do: func(ctx context.Context) error {
// 重置分片reader位置因为HashReader、上一次失败已经读取到分片EOF
reader.Seek(0, io.SeekStart)
// 创建表单数据
var b bytes.Buffer
w := multipart.NewWriter(&b)
// 添加表单字段
err = w.WriteField("preuploadID", createResp.Data.PreuploadID)
if err != nil {
return err
}
err = w.WriteField("sliceNo", strconv.FormatInt(partNumber, 10))
if err != nil {
return err
}
err = w.WriteField("sliceMD5", sliceMD5)
if err != nil {
return err
}
// 写入文件内容
fw, err := w.CreateFormFile("slice", fmt.Sprintf("%s.part%d", file.GetName(), partNumber))
if err != nil {
return err
}
_, err = utils.CopyWithBuffer(fw, rateLimitedRd)
if err != nil {
return err
}
err = w.Close()
if err != nil {
return err
}
// 创建请求并设置header
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadDomain+"/upload/v2/file/slice", &b)
if err != nil {
return err
}
// 设置请求头
req.Header.Add("Authorization", "Bearer "+d.AccessToken)
req.Header.Add("Content-Type", w.FormDataContentType())
req.Header.Add("Platform", "open_platform")
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return fmt.Errorf("slice %d upload failed, status code: %d", partNumber, res.StatusCode)
}
var resp BaseResp
respBody, err := io.ReadAll(res.Body)
if err != nil {
return err
}
err = json.Unmarshal(respBody, &resp)
if err != nil {
return err
}
if resp.Code != 0 {
return fmt.Errorf("slice %d upload failed: %s", partNumber, resp.Message)
}
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
up(progress)
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
})
}
if err := threadG.Wait(); err != nil {
return err
}
return nil
} }
// 上传完毕
func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) { func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
var resp UploadCompleteResp var resp UploadCompleteResp
_, err := d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) { _, err := d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
@ -61,91 +184,3 @@ func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
} }
return &resp, nil return &resp, nil
} }
func (d *Open123) async(preuploadID string) (*UploadAsyncResp, error) {
var resp UploadAsyncResp
_, err := d.Request(UploadAsync, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"preuploadID": preuploadID,
})
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
size := file.GetSize()
chunkSize := createResp.Data.SliceSize
uploadNums := (size + chunkSize - 1) / chunkSize
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.UploadThread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for partIndex := int64(0); partIndex < uploadNums; partIndex++ {
if utils.IsCanceled(uploadCtx) {
return ctx.Err()
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片号从1开始
offset := partIndex * chunkSize
size := min(chunkSize, size-offset)
limitedReader, err := file.RangeRead(http_range.Range{
Start: offset,
Length: size})
if err != nil {
return err
}
limitedReader = driver.NewLimitedUploadStream(ctx, limitedReader)
threadG.Go(func(ctx context.Context) error {
uploadPartUrl, err := d.url(createResp.Data.PreuploadID, partNumber)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, "PUT", uploadPartUrl, limitedReader)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = size
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
_ = res.Body.Close()
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
up(progress)
return nil
})
}
if err := threadG.Wait(); err != nil {
return err
}
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
if err != nil {
return err
}
if uploadCompleteResp.Data.Async == false || uploadCompleteResp.Data.Completed {
return nil
}
for {
uploadAsyncResp, err := d.async(createResp.Data.PreuploadID)
if err != nil {
return err
}
if uploadAsyncResp.Data.Completed {
break
}
}
up(100)
return nil
}

View File

@ -19,16 +19,14 @@ var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于
AccessToken = InitApiInfo(Api+"/api/v1/access_token", 1) AccessToken = InitApiInfo(Api+"/api/v1/access_token", 1)
RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1) RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1)
UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1) UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1)
FileList = InitApiInfo(Api+"/api/v2/file/list", 4) FileList = InitApiInfo(Api+"/api/v2/file/list", 3)
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 0) DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 0)
Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2) Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2)
Move = InitApiInfo(Api+"/api/v1/file/move", 1) Move = InitApiInfo(Api+"/api/v1/file/move", 1)
Rename = InitApiInfo(Api+"/api/v1/file/name", 1) Rename = InitApiInfo(Api+"/api/v1/file/name", 1)
Trash = InitApiInfo(Api+"/api/v1/file/trash", 2) Trash = InitApiInfo(Api+"/api/v1/file/trash", 2)
UploadCreate = InitApiInfo(Api+"/upload/v1/file/create", 2) UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2)
UploadUrl = InitApiInfo(Api+"/upload/v1/file/get_upload_url", 0) UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0)
UploadComplete = InitApiInfo(Api+"/upload/v1/file/upload_complete", 0)
UploadAsync = InitApiInfo(Api+"/upload/v1/file/upload_async_result", 1)
) )
func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {

View File

@ -70,14 +70,6 @@ func (d *Pan123Share) List(ctx context.Context, dir model.Obj, args model.ListAr
func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
// TODO return link of file, required // TODO return link of file, required
if f, ok := file.(File); ok { if f, ok := file.(File); ok {
//var resp DownResp
var headers map[string]string
if !utils.IsLocalIPAddr(args.IP) {
headers = map[string]string{
//"X-Real-IP": "1.1.1.1",
"X-Forwarded-For": args.IP,
}
}
data := base.Json{ data := base.Json{
"shareKey": d.ShareKey, "shareKey": d.ShareKey,
"SharePwd": d.SharePwd, "SharePwd": d.SharePwd,
@ -87,25 +79,27 @@ func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkA
"size": f.Size, "size": f.Size,
} }
resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) { resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetHeaders(headers) req.SetBody(data)
}, nil) }, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
downloadUrl := utils.Json.Get(resp, "data", "DownloadURL").ToString() downloadUrl := utils.Json.Get(resp, "data", "DownloadURL").ToString()
u, err := url.Parse(downloadUrl) ou, err := url.Parse(downloadUrl)
if err != nil { if err != nil {
return nil, err return nil, err
} }
nu := u.Query().Get("params") u_ := ou.String()
nu := ou.Query().Get("params")
if nu != "" { if nu != "" {
du, _ := base64.StdEncoding.DecodeString(nu) du, _ := base64.StdEncoding.DecodeString(nu)
u, err = url.Parse(string(du)) u, err := url.Parse(string(du))
if err != nil { if err != nil {
return nil, err return nil, err
} }
u_ = u.String()
} }
u_ := u.String()
log.Debug("download url: ", u_) log.Debug("download url: ", u_)
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_) res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
if err != nil { if err != nil {
@ -122,7 +116,7 @@ func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkA
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString() link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
} }
link.Header = http.Header{ link.Header = http.Header{
"Referer": []string{"https://www.123pan.com/"}, "Referer": []string{fmt.Sprintf("%s://%s/", ou.Scheme, ou.Host)},
} }
return &link, nil return &link, nil
} }

View File

@ -17,15 +17,8 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "123PanShare", Name: "123PanShare",
LocalSort: true, LocalSort: true,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: true, NoUpload: true,
NeedMs: false,
DefaultRoot: "0", DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
} }
func init() { func init() {

View File

@ -522,19 +522,17 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
var err error var err error
fullHash := stream.GetHash().GetHash(utils.SHA256) fullHash := stream.GetHash().GetHash(utils.SHA256)
if len(fullHash) != utils.SHA256.Width { if len(fullHash) != utils.SHA256.Width {
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA256) _, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA256)
if err != nil { if err != nil {
return err return err
} }
} }
size := stream.GetSize() size := stream.GetSize()
var partSize = d.getPartSize(size) partSize := d.getPartSize(size)
part := size / partSize part := int64(1)
if size%partSize > 0 { if size > partSize {
part++ part = (size + partSize - 1) / partSize
} else if part == 0 {
part = 1
} }
partInfos := make([]PartInfo, 0, part) partInfos := make([]PartInfo, 0, part)
for i := int64(0); i < part; i++ { for i := int64(0); i < part; i++ {
@ -636,11 +634,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
// Update Progress // Update Progress
r := io.TeeReader(limitReader, p) r := io.TeeReader(limitReader, p)
req, err := http.NewRequest("PUT", uploadPartInfo.UploadUrl, r) req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartInfo.UploadUrl, r)
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", fmt.Sprint(partSize)) req.Header.Set("Content-Length", fmt.Sprint(partSize))
req.Header.Set("Origin", "https://yun.139.com") req.Header.Set("Origin", "https://yun.139.com")
@ -786,12 +783,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
size := stream.GetSize() size := stream.GetSize()
// Progress // Progress
p := driver.NewProgress(size, up) p := driver.NewProgress(size, up)
var partSize = d.getPartSize(size) partSize := d.getPartSize(size)
part := size / partSize part := int64(1)
if size%partSize > 0 { if size > partSize {
part++ part = (size + partSize - 1) / partSize
} else if part == 0 {
part = 1
} }
rateLimited := driver.NewLimitedUploadStream(ctx, stream) rateLimited := driver.NewLimitedUploadStream(ctx, stream)
for i := int64(0); i < part; i++ { for i := int64(0); i < part; i++ {
@ -805,12 +800,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
limitReader := io.LimitReader(rateLimited, byteSize) limitReader := io.LimitReader(rateLimited, byteSize)
// Update Progress // Update Progress
r := io.TeeReader(limitReader, p) r := io.TeeReader(limitReader, p)
req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r) req, err := http.NewRequestWithContext(ctx, http.MethodPost, resp.Data.UploadResult.RedirectionURL, r)
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName())) req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName()))
req.Header.Set("contentSize", strconv.FormatInt(size, 10)) req.Header.Set("contentSize", strconv.FormatInt(size, 10))
req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1)) req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1))

View File

@ -365,11 +365,10 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
log.Debugf("uploadData: %+v", uploadData) log.Debugf("uploadData: %+v", uploadData)
requestURL := uploadData.RequestURL requestURL := uploadData.RequestURL
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&") uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) req, err := http.NewRequestWithContext(ctx, http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
for _, v := range uploadHeaders { for _, v := range uploadHeaders {
i := strings.Index(v, "=") i := strings.Index(v, "=")
req.Header.Set(v[0:i], v[i+1:]) req.Header.Set(v[0:i], v[i+1:])

View File

@ -5,17 +5,19 @@ import (
"encoding/base64" "encoding/base64"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"github.com/skip2/go-qrcode"
"io" "io"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/skip2/go-qrcode"
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
@ -311,11 +313,14 @@ func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
// 旧版本上传,家庭云不支持覆盖 // 旧版本上传,家庭云不支持覆盖
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
tempFile, err := file.CacheFullInTempFile() fileMd5 := file.GetHash().GetHash(utils.MD5)
if err != nil { var tempFile = file.GetFile()
return nil, err var err error
if len(fileMd5) != utils.MD5.Width {
tempFile, fileMd5, err = stream.CacheFullAndHash(file, &up, utils.MD5)
} else if tempFile == nil {
tempFile, err = file.CacheFullAndWriter(&up, nil)
} }
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -345,7 +350,7 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId) header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
} }
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily) _, err := y.put(ctx, status.FileUploadUrl, header, true, tempFile, isFamily)
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" { if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
return nil, err return nil, err
} }

View File

@ -7,6 +7,7 @@ import (
"encoding/hex" "encoding/hex"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"hash"
"io" "io"
"net/http" "net/http"
"net/http/cookiejar" "net/http/cookiejar"
@ -472,7 +473,7 @@ func (y *Cloud189PC) refreshSession() (err error) {
// 无法上传大小为0的文件 // 无法上传大小为0的文件
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
size := file.GetSize() size := file.GetSize()
sliceSize := partSize(size) sliceSize := min(size, partSize(size))
params := Params{ params := Params{
"parentFolderId": dstDir.GetID(), "parentFolderId": dstDir.GetID(),
@ -500,43 +501,71 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
return nil, err return nil, err
} }
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread, ss, err := stream.NewStreamSectionReader(file, int(sliceSize), &up)
if err != nil {
return nil, err
}
threadG, upCtx := errgroup.NewOrderedGroupWithContext(ctx, y.uploadThread,
retry.Attempts(3), retry.Attempts(3),
retry.Delay(time.Second), retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay)) retry.DelayType(retry.BackOffDelay))
count := int(size / sliceSize) count := 1
if size > sliceSize {
count = int((size + sliceSize - 1) / sliceSize)
}
lastPartSize := size % sliceSize lastPartSize := size % sliceSize
if lastPartSize > 0 { if lastPartSize == 0 {
count++
} else {
lastPartSize = sliceSize lastPartSize = sliceSize
} }
fileMd5 := utils.MD5.NewFunc()
silceMd5 := utils.MD5.NewFunc()
silceMd5Hexs := make([]string, 0, count) silceMd5Hexs := make([]string, 0, count)
teeReader := io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)) silceMd5 := utils.MD5.NewFunc()
byteSize := sliceSize var writers io.Writer = silceMd5
fileMd5Hex := file.GetHash().GetHash(utils.MD5)
var fileMd5 hash.Hash
if len(fileMd5Hex) != utils.MD5.Width {
fileMd5 = utils.MD5.NewFunc()
writers = io.MultiWriter(silceMd5, fileMd5)
}
for i := 1; i <= count; i++ { for i := 1; i <= count; i++ {
if utils.IsCanceled(upCtx) { if utils.IsCanceled(upCtx) {
break break
} }
offset := int64((i)-1) * sliceSize
size := sliceSize
if i == count { if i == count {
byteSize = lastPartSize size = lastPartSize
}
partInfo := ""
var reader *stream.SectionReader
var rateLimitedRd io.Reader
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
reader, err = ss.GetSectionReader(offset, size)
if err != nil {
return err
} }
byteData := make([]byte, byteSize)
// 读取块
silceMd5.Reset() silceMd5.Reset()
if _, err := io.ReadFull(teeReader, byteData); err != io.EOF && err != nil { w, err := utils.CopyWithBuffer(writers, reader)
return nil, err if w != size {
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", size, w, err)
} }
// 计算块md5并进行hex和base64编码 // 计算块md5并进行hex和base64编码
md5Bytes := silceMd5.Sum(nil) md5Bytes := silceMd5.Sum(nil)
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes))) silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes)) partInfo = fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
threadG.Go(func(ctx context.Context) error { rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
return nil
},
Do: func(ctx context.Context) error {
reader.Seek(0, io.SeekStart)
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo) uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
if err != nil { if err != nil {
return err return err
@ -545,19 +574,26 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
// step.4 上传切片 // step.4 上传切片
uploadUrl := uploadUrls[0] uploadUrl := uploadUrls[0]
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily) driver.NewLimitedUploadStream(ctx, rateLimitedRd), isFamily)
if err != nil { if err != nil {
return err return err
} }
up(float64(threadG.Success()) * 100 / float64(count)) up(float64(threadG.Success()) * 100 / float64(count))
return nil return nil
}) },
After: func(err error) {
ss.FreeSectionReader(reader)
},
},
)
} }
if err = threadG.Wait(); err != nil { if err = threadG.Wait(); err != nil {
return nil, err return nil, err
} }
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil))) if fileMd5 != nil {
fileMd5Hex = strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
}
sliceMd5Hex := fileMd5Hex sliceMd5Hex := fileMd5Hex
if file.GetSize() > sliceSize { if file.GetSize() > sliceSize {
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n"))) sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
@ -620,11 +656,12 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
cache = tmpF cache = tmpF
} }
sliceSize := partSize(size) sliceSize := partSize(size)
count := int(size / sliceSize) count := 1
if size > sliceSize {
count = int((size + sliceSize - 1) / sliceSize)
}
lastSliceSize := size % sliceSize lastSliceSize := size % sliceSize
if lastSliceSize > 0 { if lastSliceSize == 0 {
count++
} else {
lastSliceSize = sliceSize lastSliceSize = sliceSize
} }
@ -738,7 +775,8 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
} }
// step.4 上传切片 // step.4 上传切片
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(cache, offset, byteSize), isFamily) rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize))
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily)
if err != nil { if err != nil {
return err return err
} }
@ -820,7 +858,7 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo
// 旧版本上传,家庭云不支持覆盖 // 旧版本上传,家庭云不支持覆盖
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
tempFile, fileMd5, err := stream.CacheFullInTempFileAndHash(file, utils.MD5) tempFile, fileMd5, err := stream.CacheFullAndHash(file, &up, utils.MD5)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -3,7 +3,9 @@ package alias
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"io" "io"
"net/url"
stdpath "path" stdpath "path"
"strings" "strings"
@ -11,8 +13,11 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/fs" "github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
) )
type Alias struct { type Alias struct {
@ -75,10 +80,18 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
return nil, errs.ObjectNotFound return nil, errs.ObjectNotFound
} }
for _, dst := range dsts { for _, dst := range dsts {
obj, err := d.get(ctx, path, dst, sub) obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
if err == nil { if err != nil {
return obj, nil continue
} }
return &model.Object{
Path: path,
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}, nil
} }
return nil, errs.ObjectNotFound return nil, errs.ObjectNotFound
} }
@ -96,7 +109,27 @@ func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
var objs []model.Obj var objs []model.Obj
fsArgs := &fs.ListArgs{NoLog: true, Refresh: args.Refresh} fsArgs := &fs.ListArgs{NoLog: true, Refresh: args.Refresh}
for _, dst := range dsts { for _, dst := range dsts {
tmp, err := d.list(ctx, dst, sub, fsArgs) tmp, err := fs.List(ctx, stdpath.Join(dst, sub), fsArgs)
if err == nil {
tmp, err = utils.SliceConvert(tmp, func(obj model.Obj) (model.Obj, error) {
thumb, ok := model.GetThumb(obj)
objRes := model.Object{
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
}
if !ok {
return &objRes, nil
}
return &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
}, nil
})
}
if err == nil { if err == nil {
objs = append(objs, tmp...) objs = append(objs, tmp...)
} }
@ -110,22 +143,45 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if !ok { if !ok {
return nil, errs.ObjectNotFound return nil, errs.ObjectNotFound
} }
// proxy || ftp,s3
if common.GetApiUrl(ctx) == "" {
args.Redirect = false
}
for _, dst := range dsts { for _, dst := range dsts {
link, err := d.link(ctx, dst, sub, args) reqPath := stdpath.Join(dst, sub)
if err == nil { link, fi, err := d.link(ctx, reqPath, args)
link.Expiration = nil // 去除非必要缓存d.link里op.Lin有缓存 if err != nil {
if !args.Redirect && len(link.URL) > 0 { continue
// 正常情况下 多并发 仅支持返回URL的驱动 }
// alias套娃alias 可以让crypt、mega等驱动(不返回URL的) 支持并发 if link == nil {
// 重定向且需要通过代理
return &model.Link{
URL: fmt.Sprintf("%s/p%s?sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
sign.Sign(reqPath)),
}, nil
}
resultLink := *link
resultLink.SyncClosers = utils.NewSyncClosers(link)
if args.Redirect {
return &resultLink, nil
}
if resultLink.ContentLength == 0 {
resultLink.ContentLength = fi.GetSize()
}
if resultLink.MFile != nil {
return &resultLink, nil
}
if d.DownloadConcurrency > 0 { if d.DownloadConcurrency > 0 {
link.Concurrency = d.DownloadConcurrency resultLink.Concurrency = d.DownloadConcurrency
} }
if d.DownloadPartSize > 0 { if d.DownloadPartSize > 0 {
link.PartSize = d.DownloadPartSize * utils.KB resultLink.PartSize = d.DownloadPartSize * utils.KB
}
}
return link, nil
} }
return &resultLink, nil
} }
return nil, errs.ObjectNotFound return nil, errs.ObjectNotFound
} }
@ -167,7 +223,8 @@ func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
} }
if len(srcPath) == len(dstPath) { if len(srcPath) == len(dstPath) {
for i := range srcPath { for i := range srcPath {
err = errors.Join(err, fs.Move(ctx, *srcPath[i], *dstPath[i])) _, e := fs.Move(ctx, *srcPath[i], *dstPath[i])
err = errors.Join(err, e)
} }
return err return err
} else { } else {
@ -251,20 +308,29 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer,
reqPath, err := d.getReqPath(ctx, dstDir, true) reqPath, err := d.getReqPath(ctx, dstDir, true)
if err == nil { if err == nil {
if len(reqPath) == 1 { if len(reqPath) == 1 {
return fs.PutDirectly(ctx, *reqPath[0], s) storage, reqActualPath, err := op.GetStorageAndActualPath(*reqPath[0])
} else {
defer s.Close()
file, err := s.CacheFullInTempFile()
if err != nil { if err != nil {
return err return err
} }
for _, path := range reqPath { return op.Put(ctx, storage, reqActualPath, &stream.FileStream{
Obj: s,
Mimetype: s.GetMimetype(),
Reader: s,
}, up)
} else {
file, err := s.CacheFullAndWriter(nil, nil)
if err != nil {
return err
}
count := float64(len(reqPath) + 1)
up(100 / count)
for i, path := range reqPath {
err = errors.Join(err, fs.PutDirectly(ctx, *path, &stream.FileStream{ err = errors.Join(err, fs.PutDirectly(ctx, *path, &stream.FileStream{
Obj: s, Obj: s,
Mimetype: s.GetMimetype(), Mimetype: s.GetMimetype(),
WebPutAsTask: s.NeedStore(),
Reader: file, Reader: file,
})) }))
up(float64(i+2) / float64(count) * 100)
_, e := file.Seek(0, io.SeekStart) _, e := file.Seek(0, io.SeekStart)
if e != nil { if e != nil {
return errors.Join(err, e) return errors.Join(err, e)
@ -336,18 +402,24 @@ func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveIn
return nil, errs.ObjectNotFound return nil, errs.ObjectNotFound
} }
for _, dst := range dsts { for _, dst := range dsts {
link, err := d.extract(ctx, dst, sub, args) reqPath := stdpath.Join(dst, sub)
if err == nil { link, err := d.extract(ctx, reqPath, args)
if !args.Redirect && len(link.URL) > 0 { if err != nil {
if d.DownloadConcurrency > 0 { continue
link.Concurrency = d.DownloadConcurrency
} }
if d.DownloadPartSize > 0 { if link == nil {
link.PartSize = d.DownloadPartSize * utils.KB return &model.Link{
} URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
} common.GetApiUrl(ctx),
return link, nil utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}, nil
} }
resultLink := *link
resultLink.SyncClosers = utils.NewSyncClosers(link)
return &resultLink, nil
} }
return nil, errs.NotImplement return nil, errs.NotImplement
} }

View File

@ -2,8 +2,6 @@ package alias
import ( import (
"context" "context"
"fmt"
"net/url"
stdpath "path" stdpath "path"
"strings" "strings"
@ -12,8 +10,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/fs" "github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common" "github.com/OpenListTeam/OpenList/v4/server/common"
) )
@ -54,79 +50,22 @@ func (d *Alias) getRootAndPath(path string) (string, string) {
return parts[0], parts[1] return parts[0], parts[1]
} }
func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Obj, error) { func (d *Alias) link(ctx context.Context, reqPath string, args model.LinkArgs) (*model.Link, model.Obj, error) {
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
if err != nil {
return nil, err
}
return &model.Object{
Path: path,
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}, nil
}
func (d *Alias) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]model.Obj, error) {
objs, err := fs.List(ctx, stdpath.Join(dst, sub), args)
// the obj must implement the model.SetPath interface
// return objs, err
if err != nil {
return nil, err
}
return utils.SliceConvert(objs, func(obj model.Obj) (model.Obj, error) {
thumb, ok := model.GetThumb(obj)
objRes := model.Object{
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
}
if !ok {
return &objRes, nil
}
return &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
}, nil
})
}
func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs) (*model.Link, error) {
reqPath := stdpath.Join(dst, sub)
// 参考 crypt 驱动
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath) storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil { if err != nil {
return nil, err return nil, nil, err
} }
useRawLink := len(common.GetApiUrl(ctx)) == 0 // ftp、s3 if !args.Redirect {
if !useRawLink { return op.Link(ctx, storage, reqActualPath, args)
_, ok := storage.(*Alias)
useRawLink = !ok && !args.Redirect
} }
if useRawLink { obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
link, _, err := op.Link(ctx, storage, reqActualPath, args)
return link, err
}
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil { if err != nil {
return nil, err return nil, nil, err
} }
if common.ShouldProxy(storage, stdpath.Base(sub)) { if common.ShouldProxy(storage, stdpath.Base(reqPath)) {
link := &model.Link{ return nil, obj, nil
URL: fmt.Sprintf("%s/p%s?sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
sign.Sign(reqPath)),
} }
return link, nil return op.Link(ctx, storage, reqActualPath, args)
}
link, _, err := op.Link(ctx, storage, reqActualPath, args)
return link, err
} }
func (d *Alias) getReqPath(ctx context.Context, obj model.Obj, isParent bool) ([]*string, error) { func (d *Alias) getReqPath(ctx context.Context, obj model.Obj, isParent bool) ([]*string, error) {
@ -197,8 +136,7 @@ func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.Arc
return nil, errs.NotImplement return nil, errs.NotImplement
} }
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) { func (d *Alias) extract(ctx context.Context, reqPath string, args model.ArchiveInnerArgs) (*model.Link, error) {
reqPath := stdpath.Join(dst, sub)
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath) storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil { if err != nil {
return nil, err return nil, err
@ -206,20 +144,12 @@ func (d *Alias) extract(ctx context.Context, dst, sub string, args model.Archive
if _, ok := storage.(driver.ArchiveReader); !ok { if _, ok := storage.(driver.ArchiveReader); !ok {
return nil, errs.NotImplement return nil, errs.NotImplement
} }
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(sub)) { if args.Redirect && common.ShouldProxy(storage, stdpath.Base(reqPath)) {
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true}) _, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil { if err == nil {
return nil, err return nil, err
} }
link := &model.Link{ return nil, nil
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}
return link, nil
} }
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args) link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
return link, err return link, err

View File

@ -165,7 +165,7 @@ func (d *AliDrive) Remove(ctx context.Context, obj model.Obj) error {
} }
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error { func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
file := stream.FileStream{ file := &stream.FileStream{
Obj: streamer, Obj: streamer,
Reader: streamer, Reader: streamer,
Mimetype: streamer.GetMimetype(), Mimetype: streamer.GetMimetype(),
@ -209,7 +209,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
io.Closer io.Closer
}{ }{
Reader: io.MultiReader(buf, file), Reader: io.MultiReader(buf, file),
Closer: &file, Closer: file,
} }
} }
} else { } else {
@ -297,11 +297,10 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
if d.InternalUpload { if d.InternalUpload {
url = partInfo.InternalUploadUrl url = partInfo.InternalUploadUrl
} }
req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT)) req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, io.LimitReader(rateLimited, DEFAULT))
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
res, err := base.HttpClient.Do(req) res, err := base.HttpClient.Do(req)
if err != nil { if err != nil {
return err return err

View File

@ -3,7 +3,6 @@ package aliyundrive_open
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"net/http" "net/http"
"path/filepath" "path/filepath"
"time" "time"
@ -13,7 +12,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/rateg"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -24,8 +22,7 @@ type AliyundriveOpen struct {
DriveId string DriveId string
limitList func(ctx context.Context, data base.Json) (*Files, error) limiter *limiter
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
ref *AliyundriveOpen ref *AliyundriveOpen
} }
@ -38,25 +35,23 @@ func (d *AliyundriveOpen) GetAddition() driver.Additional {
} }
func (d *AliyundriveOpen) Init(ctx context.Context) error { func (d *AliyundriveOpen) Init(ctx context.Context) error {
d.limiter = getLimiterForUser(globalLimiterUserID) // First create a globally shared limiter to limit the initial requests.
if d.LIVPDownloadFormat == "" { if d.LIVPDownloadFormat == "" {
d.LIVPDownloadFormat = "jpeg" d.LIVPDownloadFormat = "jpeg"
} }
if d.DriveType == "" { if d.DriveType == "" {
d.DriveType = "default" d.DriveType = "default"
} }
res, err := d.request("/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil) res, err := d.request(ctx, limiterOther, "/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
if err != nil { if err != nil {
d.limiter.free()
d.limiter = nil
return err return err
} }
d.DriveId = utils.Json.Get(res, d.DriveType+"_drive_id").ToString() d.DriveId = utils.Json.Get(res, d.DriveType+"_drive_id").ToString()
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{ userid := utils.Json.Get(res, "user_id").ToString()
Limit: 4, d.limiter.free()
Bucket: 1, d.limiter = getLimiterForUser(userid) // Allocate a corresponding limiter for each user.
})
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
Limit: 1,
Bucket: 1,
})
return nil return nil
} }
@ -70,6 +65,8 @@ func (d *AliyundriveOpen) InitReference(storage driver.Driver) error {
} }
func (d *AliyundriveOpen) Drop(ctx context.Context) error { func (d *AliyundriveOpen) Drop(ctx context.Context) error {
d.limiter.free()
d.limiter = nil
d.ref = nil d.ref = nil
return nil return nil
} }
@ -87,9 +84,6 @@ func (d *AliyundriveOpen) GetRoot(ctx context.Context) (model.Obj, error) {
} }
func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if d.limitList == nil {
return nil, fmt.Errorf("driver not init")
}
files, err := d.getFiles(ctx, dir.GetID()) files, err := d.getFiles(ctx, dir.GetID())
if err != nil { if err != nil {
return nil, err return nil, err
@ -107,8 +101,8 @@ func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.Li
return objs, err return objs, err
} }
func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link, error) { func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
res, err := d.request("/adrive/v1.0/openFile/getDownloadUrl", http.MethodPost, func(req *resty.Request) { res, err := d.request(ctx, limiterLink, "/adrive/v1.0/openFile/getDownloadUrl", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": file.GetID(), "file_id": file.GetID(),
@ -132,17 +126,10 @@ func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link
}, nil }, nil
} }
func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.limitLink == nil {
return nil, fmt.Errorf("driver not init")
}
return d.limitLink(ctx, file)
}
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
nowTime, _ := getNowTime() nowTime, _ := getNowTime()
newDir := File{CreatedAt: nowTime, UpdatedAt: nowTime} newDir := File{CreatedAt: nowTime, UpdatedAt: nowTime}
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"parent_file_id": parentDir.GetID(), "parent_file_id": parentDir.GetID(),
@ -168,7 +155,7 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var resp MoveOrCopyResp var resp MoveOrCopyResp
_, err := d.request("/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": srcObj.GetID(), "file_id": srcObj.GetID(),
@ -198,7 +185,7 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (m
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
var newFile File var newFile File
_, err := d.request("/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": srcObj.GetID(), "file_id": srcObj.GetID(),
@ -230,7 +217,7 @@ func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
var resp MoveOrCopyResp var resp MoveOrCopyResp
_, err := d.request("/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": srcObj.GetID(), "file_id": srcObj.GetID(),
@ -256,7 +243,7 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
if d.RemoveWay == "delete" { if d.RemoveWay == "delete" {
uri = "/adrive/v1.0/openFile/delete" uri = "/adrive/v1.0/openFile/delete"
} }
_, err := d.request(uri, http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, uri, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": obj.GetID(), "file_id": obj.GetID(),
@ -295,7 +282,7 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
default: default:
return nil, errs.NotSupport return nil, errs.NotSupport
} }
_, err := d.request(uri, http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, uri, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp) req.SetBody(data).SetResult(&resp)
}) })
if err != nil { if err != nil {

View File

@ -0,0 +1,96 @@
package aliyundrive_open
import (
"context"
"fmt"
"sync"
"golang.org/x/time/rate"
)
// See document https://www.yuque.com/aliyundrive/zpfszx/mqocg38hlxzc5vcd
// See issue https://github.com/OpenListTeam/OpenList/issues/724
// We got limit per user per app, so the limiter should be global.
type limiterType int
const (
limiterList limiterType = iota
limiterLink
limiterOther
)
const (
listRateLimit = 3.9 // 4 per second in document, but we use 3.9 per second to be safe
linkRateLimit = 0.9 // 1 per second in document, but we use 0.9 per second to be safe
otherRateLimit = 14.9 // 15 per second in document, but we use 14.9 per second to be safe
globalLimiterUserID = "" // Global limiter user ID, used to limit the initial requests.
)
type limiter struct {
usedBy int
list *rate.Limiter
link *rate.Limiter
other *rate.Limiter
}
var limiters = make(map[string]*limiter)
var limitersLock = &sync.Mutex{}
func getLimiterForUser(userid string) *limiter {
limitersLock.Lock()
defer limitersLock.Unlock()
defer func() {
// Clean up limiters that are no longer used.
for id, lim := range limiters {
if lim.usedBy <= 0 && id != globalLimiterUserID { // Do not delete the global limiter.
delete(limiters, id)
}
}
}()
if lim, ok := limiters[userid]; ok {
lim.usedBy++
return lim
}
lim := &limiter{
usedBy: 1,
list: rate.NewLimiter(rate.Limit(listRateLimit), 1),
link: rate.NewLimiter(rate.Limit(linkRateLimit), 1),
other: rate.NewLimiter(rate.Limit(otherRateLimit), 1),
}
limiters[userid] = lim
return lim
}
func (l *limiter) wait(ctx context.Context, typ limiterType) error {
if l == nil {
return fmt.Errorf("driver not init")
}
switch typ {
case limiterList:
return l.list.Wait(ctx)
case limiterLink:
return l.link.Wait(ctx)
case limiterOther:
return l.other.Wait(ctx)
default:
return fmt.Errorf("unknown limiter type")
}
}
func (l *limiter) free() {
if l == nil {
return
}
limitersLock.Lock()
defer limitersLock.Unlock()
l.usedBy--
}
func (d *AliyundriveOpen) wait(ctx context.Context, typ limiterType) error {
if d == nil {
return fmt.Errorf("driver not init")
}
if d.ref != nil {
return d.ref.wait(ctx, typ) // If this is a reference driver, wait on the reference driver.
}
return d.limiter.wait(ctx, typ)
}

View File

@ -12,6 +12,7 @@ type Addition struct {
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"` OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"` OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
UseOnlineAPI bool `json:"use_online_api" default:"true"` UseOnlineAPI bool `json:"use_online_api" default:"true"`
AlipanType string `json:"alipan_type" required:"true" type:"select" default:"default" options:"default,alipanTV"`
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/alicloud/renewapi"` APIAddress string `json:"api_url_address" default:"https://api.oplist.org/alicloud/renewapi"`
ClientID string `json:"client_id" help:"Keep it empty if you don't have one"` ClientID string `json:"client_id" help:"Keep it empty if you don't have one"`
ClientSecret string `json:"client_secret" help:"Keep it empty if you don't have one"` ClientSecret string `json:"client_secret" help:"Keep it empty if you don't have one"`
@ -24,12 +25,6 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "AliyundriveOpen", Name: "AliyundriveOpen",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "root", DefaultRoot: "root",
NoOverwriteUpload: true, NoOverwriteUpload: true,
} }

View File

@ -50,10 +50,10 @@ func calPartSize(fileSize int64) int64 {
return partSize return partSize
} }
func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]PartInfo, error) { func (d *AliyundriveOpen) getUploadUrl(ctx context.Context, count int, fileId, uploadId string) ([]PartInfo, error) {
partInfoList := makePartInfos(count) partInfoList := makePartInfos(count)
var resp CreateResp var resp CreateResp
_, err := d.request("/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": fileId, "file_id": fileId,
@ -69,7 +69,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
if d.InternalUpload { if d.InternalUpload {
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/") uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
} }
req, err := http.NewRequestWithContext(ctx, "PUT", uploadUrl, r) req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, r)
if err != nil { if err != nil {
return err return err
} }
@ -84,10 +84,10 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
return nil return nil
} }
func (d *AliyundriveOpen) completeUpload(fileId, uploadId string) (model.Obj, error) { func (d *AliyundriveOpen) completeUpload(ctx context.Context, fileId, uploadId string) (model.Obj, error) {
// 3. complete // 3. complete
var newFile File var newFile File
_, err := d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": fileId, "file_id": fileId,
@ -137,11 +137,8 @@ func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error
} }
buf := make([]byte, length) buf := make([]byte, length)
n, err := io.ReadFull(reader, buf) n, err := io.ReadFull(reader, buf)
if err == io.ErrUnexpectedEOF { if n != int(length) {
return "", fmt.Errorf("can't read data, expected=%d, got=%d", len(buf), n) return "", fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
}
if err != nil {
return "", err
} }
return base64.StdEncoding.EncodeToString(buf), nil return base64.StdEncoding.EncodeToString(buf), nil
} }
@ -183,7 +180,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
createData["pre_hash"] = hash createData["pre_hash"] = hash
} }
var createResp CreateResp var createResp CreateResp
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) { _, err, e := d.requestReturnErrResp(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp) req.SetBody(createData).SetResult(&createResp)
}) })
if err != nil { if err != nil {
@ -194,7 +191,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
hash := stream.GetHash().GetHash(utils.SHA1) hash := stream.GetHash().GetHash(utils.SHA1)
if len(hash) != utils.SHA1.Width { if len(hash) != utils.SHA1.Width {
_, hash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA1) _, hash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -208,7 +205,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if err != nil { if err != nil {
return nil, fmt.Errorf("cal proof code error: %s", err.Error()) return nil, fmt.Errorf("cal proof code error: %s", err.Error())
} }
_, err = d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) { _, err = d.request(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp) req.SetBody(createData).SetResult(&createResp)
}) })
if err != nil { if err != nil {
@ -219,17 +216,20 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if !createResp.RapidUpload { if !createResp.RapidUpload {
// 2. normal upload // 2. normal upload
log.Debugf("[aliyundive_open] normal upload") log.Debugf("[aliyundive_open] normal upload")
ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize), &up)
if err != nil {
return nil, err
}
preTime := time.Now() preTime := time.Now()
var offset, length int64 = 0, partSize var offset, length int64 = 0, partSize
//var length
for i := 0; i < len(createResp.PartInfoList); i++ { for i := 0; i < len(createResp.PartInfoList); i++ {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return nil, ctx.Err() return nil, ctx.Err()
} }
// refresh upload url if 50 minutes passed // refresh upload url if 50 minutes passed
if time.Since(preTime) > 50*time.Minute { if time.Since(preTime) > 50*time.Minute {
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId) createResp.PartInfoList, err = d.getUploadUrl(ctx, count, createResp.FileId, createResp.UploadId)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -238,22 +238,19 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if remain := stream.GetSize() - offset; length > remain { if remain := stream.GetSize() - offset; length > remain {
length = remain length = remain
} }
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize)) rd, err := ss.GetSectionReader(offset, length)
if rapidUpload {
srd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
if err != nil { if err != nil {
return nil, err return nil, err
} }
rd = utils.NewMultiReadable(srd)
}
err = retry.Do(func() error {
_ = rd.Reset()
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd) rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
err = retry.Do(func() error {
rd.Seek(0, io.SeekStart)
return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i]) return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i])
}, },
retry.Attempts(3), retry.Attempts(3),
retry.DelayType(retry.BackOffDelay), retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second)) retry.Delay(time.Second))
ss.FreeSectionReader(rd)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -266,5 +263,5 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp) log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp)
// 3. complete // 3. complete
return d.completeUpload(createResp.FileId, createResp.UploadId) return d.completeUpload(ctx, createResp.FileId, createResp.UploadId)
} }

View File

@ -19,7 +19,7 @@ import (
// do others that not defined in Driver interface // do others that not defined in Driver interface
func (d *AliyundriveOpen) _refreshToken() (string, string, error) { func (d *AliyundriveOpen) _refreshToken(ctx context.Context) (string, string, error) {
if d.UseOnlineAPI && d.APIAddress != "" { if d.UseOnlineAPI && d.APIAddress != "" {
u := d.APIAddress u := d.APIAddress
var resp struct { var resp struct {
@ -27,13 +27,23 @@ func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
AccessToken string `json:"access_token"` AccessToken string `json:"access_token"`
ErrorMessage string `json:"text"` ErrorMessage string `json:"text"`
} }
_, err := base.RestyClient.R().
// 根据AlipanType选项设置driver_txt
driverTxt := "alicloud_qr"
if d.AlipanType == "alipanTV" {
driverTxt = "alicloud_tv"
}
err := d.wait(ctx, limiterOther)
if err != nil {
return "", "", err
}
_, err = base.RestyClient.R().
SetHeader("User-Agent", "Mozilla/5.0 (Macintosh; Apple macOS 15_5) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36 Chrome/138.0.0.0 Openlist/425.6.30"). SetHeader("User-Agent", "Mozilla/5.0 (Macintosh; Apple macOS 15_5) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36 Chrome/138.0.0.0 Openlist/425.6.30").
SetResult(&resp). SetResult(&resp).
SetQueryParams(map[string]string{ SetQueryParams(map[string]string{
"refresh_ui": d.RefreshToken, "refresh_ui": d.RefreshToken,
"server_use": "true", "server_use": "true",
"driver_txt": "alicloud_qr", "driver_txt": driverTxt,
}). }).
Get(u) Get(u)
if err != nil { if err != nil {
@ -47,11 +57,14 @@ func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
} }
return resp.RefreshToken, resp.AccessToken, nil return resp.RefreshToken, resp.AccessToken, nil
} }
// 本地刷新逻辑,必须要求 client_id 和 client_secret // 本地刷新逻辑,必须要求 client_id 和 client_secret
if d.ClientID == "" || d.ClientSecret == "" { if d.ClientID == "" || d.ClientSecret == "" {
return "", "", fmt.Errorf("empty ClientID or ClientSecret") return "", "", fmt.Errorf("empty ClientID or ClientSecret")
} }
err := d.wait(ctx, limiterOther)
if err != nil {
return "", "", err
}
url := API_URL + "/oauth/access_token" url := API_URL + "/oauth/access_token"
//var resp base.TokenResp //var resp base.TokenResp
var e ErrResp var e ErrResp
@ -103,18 +116,18 @@ func getSub(token string) (string, error) {
return utils.Json.Get(bs, "sub").ToString(), nil return utils.Json.Get(bs, "sub").ToString(), nil
} }
func (d *AliyundriveOpen) refreshToken() error { func (d *AliyundriveOpen) refreshToken(ctx context.Context) error {
if d.ref != nil { if d.ref != nil {
return d.ref.refreshToken() return d.ref.refreshToken(ctx)
} }
refresh, access, err := d._refreshToken() refresh, access, err := d._refreshToken(ctx)
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
if err == nil { if err == nil {
break break
} else { } else {
log.Errorf("[ali_open] failed to refresh token: %s", err) log.Errorf("[ali_open] failed to refresh token: %s", err)
} }
refresh, access, err = d._refreshToken() refresh, access, err = d._refreshToken(ctx)
} }
if err != nil { if err != nil {
return err return err
@ -125,12 +138,12 @@ func (d *AliyundriveOpen) refreshToken() error {
return nil return nil
} }
func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) { func (d *AliyundriveOpen) request(ctx context.Context, limitTy limiterType, uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
b, err, _ := d.requestReturnErrResp(uri, method, callback, retry...) b, err, _ := d.requestReturnErrResp(ctx, limitTy, uri, method, callback, retry...)
return b, err return b, err
} }
func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) { func (d *AliyundriveOpen) requestReturnErrResp(ctx context.Context, limitTy limiterType, uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
req := base.RestyClient.R() req := base.RestyClient.R()
// TODO check whether access_token is expired // TODO check whether access_token is expired
req.SetHeader("Authorization", "Bearer "+d.getAccessToken()) req.SetHeader("Authorization", "Bearer "+d.getAccessToken())
@ -142,6 +155,10 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
} }
var e ErrResp var e ErrResp
req.SetError(&e) req.SetError(&e)
err := d.wait(ctx, limitTy)
if err != nil {
return nil, err, nil
}
res, err := req.Execute(method, API_URL+uri) res, err := req.Execute(method, API_URL+uri)
if err != nil { if err != nil {
if res != nil { if res != nil {
@ -152,11 +169,11 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
isRetry := len(retry) > 0 && retry[0] isRetry := len(retry) > 0 && retry[0]
if e.Code != "" { if e.Code != "" {
if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.getAccessToken() == "") { if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.getAccessToken() == "") {
err = d.refreshToken() err = d.refreshToken(ctx)
if err != nil { if err != nil {
return nil, err, nil return nil, err, nil
} }
return d.requestReturnErrResp(uri, method, callback, true) return d.requestReturnErrResp(ctx, limitTy, uri, method, callback, true)
} }
return nil, fmt.Errorf("%s:%s", e.Code, e.Message), &e return nil, fmt.Errorf("%s:%s", e.Code, e.Message), &e
} }
@ -165,7 +182,7 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
func (d *AliyundriveOpen) list(ctx context.Context, data base.Json) (*Files, error) { func (d *AliyundriveOpen) list(ctx context.Context, data base.Json) (*Files, error) {
var resp Files var resp Files
_, err := d.request("/adrive/v1.0/openFile/list", http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterList, "/adrive/v1.0/openFile/list", http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp) req.SetBody(data).SetResult(&resp)
}) })
if err != nil { if err != nil {
@ -194,7 +211,7 @@ func (d *AliyundriveOpen) getFiles(ctx context.Context, fileId string) ([]File,
//"video_thumbnail_width": 480, //"video_thumbnail_width": 480,
//"image_thumbnail_width": 480, //"image_thumbnail_width": 480,
} }
resp, err := d.limitList(ctx, data) resp, err := d.list(ctx, data)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -2,7 +2,6 @@ package aliyundrive_share
import ( import (
"context" "context"
"fmt"
"net/http" "net/http"
"time" "time"
@ -12,7 +11,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/cron" "github.com/OpenListTeam/OpenList/v4/pkg/cron"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/rateg"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -25,8 +23,7 @@ type AliyundriveShare struct {
DriveId string DriveId string
cron *cron.Cron cron *cron.Cron
limitList func(ctx context.Context, dir model.Obj) ([]model.Obj, error) limiter *limiter
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
} }
func (d *AliyundriveShare) Config() driver.Config { func (d *AliyundriveShare) Config() driver.Config {
@ -38,29 +35,26 @@ func (d *AliyundriveShare) GetAddition() driver.Additional {
} }
func (d *AliyundriveShare) Init(ctx context.Context) error { func (d *AliyundriveShare) Init(ctx context.Context) error {
err := d.refreshToken() d.limiter = getLimiter()
err := d.refreshToken(ctx)
if err != nil { if err != nil {
d.limiter.free()
d.limiter = nil
return err return err
} }
err = d.getShareToken() err = d.getShareToken(ctx)
if err != nil { if err != nil {
d.limiter.free()
d.limiter = nil
return err return err
} }
d.cron = cron.NewCron(time.Hour * 2) d.cron = cron.NewCron(time.Hour * 2)
d.cron.Do(func() { d.cron.Do(func() {
err := d.refreshToken() err := d.refreshToken(ctx)
if err != nil { if err != nil {
log.Errorf("%+v", err) log.Errorf("%+v", err)
} }
}) })
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
Limit: 4,
Bucket: 1,
})
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
Limit: 1,
Bucket: 1,
})
return nil return nil
} }
@ -68,19 +62,14 @@ func (d *AliyundriveShare) Drop(ctx context.Context) error {
if d.cron != nil { if d.cron != nil {
d.cron.Stop() d.cron.Stop()
} }
d.limiter.free()
d.limiter = nil
d.DriveId = "" d.DriveId = ""
return nil return nil
} }
func (d *AliyundriveShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { func (d *AliyundriveShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if d.limitList == nil { files, err := d.getFiles(ctx, dir.GetID())
return nil, fmt.Errorf("driver not init")
}
return d.limitList(ctx, dir)
}
func (d *AliyundriveShare) list(ctx context.Context, dir model.Obj) ([]model.Obj, error) {
files, err := d.getFiles(dir.GetID())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -90,13 +79,6 @@ func (d *AliyundriveShare) list(ctx context.Context, dir model.Obj) ([]model.Obj
} }
func (d *AliyundriveShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *AliyundriveShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.limitLink == nil {
return nil, fmt.Errorf("driver not init")
}
return d.limitLink(ctx, file)
}
func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Link, error) {
data := base.Json{ data := base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": file.GetID(), "file_id": file.GetID(),
@ -105,7 +87,7 @@ func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Lin
"share_id": d.ShareId, "share_id": d.ShareId,
} }
var resp ShareLinkResp var resp ShareLinkResp
_, err := d.request("https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterLink, "https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) {
req.SetHeader(CanaryHeaderKey, CanaryHeaderValue).SetBody(data).SetResult(&resp) req.SetHeader(CanaryHeaderKey, CanaryHeaderValue).SetBody(data).SetResult(&resp)
}) })
if err != nil { if err != nil {
@ -135,7 +117,7 @@ func (d *AliyundriveShare) Other(ctx context.Context, args model.OtherArgs) (int
default: default:
return nil, errs.NotSupport return nil, errs.NotSupport
} }
_, err := d.request(url, http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, url, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp) req.SetBody(data).SetResult(&resp)
}) })
if err != nil { if err != nil {

View File

@ -0,0 +1,67 @@
package aliyundrive_share
import (
"context"
"fmt"
"golang.org/x/time/rate"
)
// See issue https://github.com/OpenListTeam/OpenList/issues/724
// Seems there is no limit per user.
type limiterType int
const (
limiterList limiterType = iota
limiterLink
limiterOther
)
const (
listRateLimit = 3.9 // 4 per second in document, but we use 3.9 per second to be safe
linkRateLimit = 0.9 // 1 per second in document, but we use 0.9 per second to be safe
otherRateLimit = 14.9 // 15 per second in document, but we use 14.9 per second to be safe
)
type limiter struct {
list *rate.Limiter
link *rate.Limiter
other *rate.Limiter
}
func getLimiter() *limiter {
return &limiter{
list: rate.NewLimiter(rate.Limit(listRateLimit), 1),
link: rate.NewLimiter(rate.Limit(linkRateLimit), 1),
other: rate.NewLimiter(rate.Limit(otherRateLimit), 1),
}
}
func (l *limiter) wait(ctx context.Context, typ limiterType) error {
if l == nil {
return fmt.Errorf("driver not init")
}
switch typ {
case limiterList:
return l.list.Wait(ctx)
case limiterLink:
return l.link.Wait(ctx)
case limiterOther:
return l.other.Wait(ctx)
default:
return fmt.Errorf("unknown limiter type")
}
}
func (l *limiter) free() {
}
func (d *AliyundriveShare) wait(ctx context.Context, typ limiterType) error {
if d == nil {
return fmt.Errorf("driver not init")
}
//if d.ref != nil {
// return d.ref.wait(ctx, typ) // If this is a reference driver, wait on the reference driver.
//}
return d.limiter.wait(ctx, typ)
}

View File

@ -1,6 +1,7 @@
package aliyundrive_share package aliyundrive_share
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
@ -15,11 +16,15 @@ const (
CanaryHeaderValue = "client=web,app=share,version=v2.3.1" CanaryHeaderValue = "client=web,app=share,version=v2.3.1"
) )
func (d *AliyundriveShare) refreshToken() error { func (d *AliyundriveShare) refreshToken(ctx context.Context) error {
err := d.wait(ctx, limiterOther)
if err != nil {
return err
}
url := "https://auth.alipan.com/v2/account/token" url := "https://auth.alipan.com/v2/account/token"
var resp base.TokenResp var resp base.TokenResp
var e ErrorResp var e ErrorResp
_, err := base.RestyClient.R(). _, err = base.RestyClient.R().
SetBody(base.Json{"refresh_token": d.RefreshToken, "grant_type": "refresh_token"}). SetBody(base.Json{"refresh_token": d.RefreshToken, "grant_type": "refresh_token"}).
SetResult(&resp). SetResult(&resp).
SetError(&e). SetError(&e).
@ -36,7 +41,11 @@ func (d *AliyundriveShare) refreshToken() error {
} }
// do others that not defined in Driver interface // do others that not defined in Driver interface
func (d *AliyundriveShare) getShareToken() error { func (d *AliyundriveShare) getShareToken(ctx context.Context) error {
err := d.wait(ctx, limiterOther)
if err != nil {
return err
}
data := base.Json{ data := base.Json{
"share_id": d.ShareId, "share_id": d.ShareId,
} }
@ -45,7 +54,7 @@ func (d *AliyundriveShare) getShareToken() error {
} }
var e ErrorResp var e ErrorResp
var resp ShareTokenResp var resp ShareTokenResp
_, err := base.RestyClient.R(). _, err = base.RestyClient.R().
SetResult(&resp).SetError(&e).SetBody(data). SetResult(&resp).SetError(&e).SetBody(data).
Post("https://api.alipan.com/v2/share_link/get_share_token") Post("https://api.alipan.com/v2/share_link/get_share_token")
if err != nil { if err != nil {
@ -58,7 +67,7 @@ func (d *AliyundriveShare) getShareToken() error {
return nil return nil
} }
func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback) ([]byte, error) { func (d *AliyundriveShare) request(ctx context.Context, limitTy limiterType, url, method string, callback base.ReqCallback) ([]byte, error) {
var e ErrorResp var e ErrorResp
req := base.RestyClient.R(). req := base.RestyClient.R().
SetError(&e). SetError(&e).
@ -71,6 +80,10 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
} else { } else {
req.SetBody("{}") req.SetBody("{}")
} }
err := d.wait(ctx, limitTy)
if err != nil {
return nil, err
}
resp, err := req.Execute(method, url) resp, err := req.Execute(method, url)
if err != nil { if err != nil {
return nil, err return nil, err
@ -78,14 +91,14 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
if e.Code != "" { if e.Code != "" {
if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" { if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" {
if e.Code == "AccessTokenInvalid" { if e.Code == "AccessTokenInvalid" {
err = d.refreshToken() err = d.refreshToken(ctx)
} else { } else {
err = d.getShareToken() err = d.getShareToken(ctx)
} }
if err != nil { if err != nil {
return nil, err return nil, err
} }
return d.request(url, method, callback) return d.request(ctx, limitTy, url, method, callback)
} else { } else {
return nil, errors.New(e.Code + ": " + e.Message) return nil, errors.New(e.Code + ": " + e.Message)
} }
@ -93,7 +106,7 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
return resp.Body(), nil return resp.Body(), nil
} }
func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) { func (d *AliyundriveShare) getFiles(ctx context.Context, fileId string) ([]File, error) {
files := make([]File, 0) files := make([]File, 0)
data := base.Json{ data := base.Json{
"image_thumbnail_process": "image/resize,w_160/format,jpeg", "image_thumbnail_process": "image/resize,w_160/format,jpeg",
@ -110,6 +123,10 @@ func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
if data["marker"] == "first" { if data["marker"] == "first" {
data["marker"] = "" data["marker"] = ""
} }
err := d.wait(ctx, limiterList)
if err != nil {
return nil, err
}
var e ErrorResp var e ErrorResp
var resp ListResp var resp ListResp
res, err := base.RestyClient.R(). res, err := base.RestyClient.R().
@ -123,11 +140,11 @@ func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
log.Debugf("aliyundrive share get files: %s", res.String()) log.Debugf("aliyundrive share get files: %s", res.String())
if e.Code != "" { if e.Code != "" {
if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" { if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" {
err = d.getShareToken() err = d.getShareToken(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return d.getFiles(fileId) return d.getFiles(ctx, fileId)
} }
return nil, errors.New(e.Message) return nil, errors.New(e.Message)
} }

View File

@ -203,11 +203,12 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
streamSize := stream.GetSize() streamSize := stream.GetSize()
sliceSize := d.getSliceSize(streamSize) sliceSize := d.getSliceSize(streamSize)
count := int(streamSize / sliceSize) count := 1
if streamSize > sliceSize {
count = int((streamSize + sliceSize - 1) / sliceSize)
}
lastBlockSize := streamSize % sliceSize lastBlockSize := streamSize % sliceSize
if lastBlockSize > 0 { if lastBlockSize == 0 {
count++
} else {
lastBlockSize = sliceSize lastBlockSize = sliceSize
} }

View File

@ -262,11 +262,12 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
// 计算需要的数据 // 计算需要的数据
streamSize := stream.GetSize() streamSize := stream.GetSize()
count := int(streamSize / DEFAULT) count := 1
if streamSize > DEFAULT {
count = int((streamSize + DEFAULT - 1) / DEFAULT)
}
lastBlockSize := streamSize % DEFAULT lastBlockSize := streamSize % DEFAULT
if lastBlockSize > 0 { if lastBlockSize == 0 {
count++
} else {
lastBlockSize = DEFAULT lastBlockSize = DEFAULT
} }

View File

@ -255,7 +255,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
}, },
UpdateProgress: up, UpdateProgress: up,
}) })
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r) req, err := http.NewRequestWithContext(ctx, http.MethodPost, "https://pan-yz.chaoxing.com/upload", r)
if err != nil { if err != nil {
return err return err
} }

View File

@ -32,7 +32,6 @@ func init() {
config: driver.Config{ config: driver.Config{
Name: "ChaoXingGroupDrive", Name: "ChaoXingGroupDrive",
OnlyProxy: true, OnlyProxy: true,
OnlyLocal: false,
DefaultRoot: "-1", DefaultRoot: "-1",
NoOverwriteUpload: true, NoOverwriteUpload: true,
}, },

View File

@ -167,7 +167,7 @@ func (d *ChaoXing) Login() (string, error) {
return "", err return "", err
} }
// Create the request // Create the request
req, err := http.NewRequest("POST", "https://passport2.chaoxing.com/fanyalogin", body) req, err := http.NewRequest(http.MethodPost, "https://passport2.chaoxing.com/fanyalogin", body)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -20,6 +20,7 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "Cloudreve", Name: "Cloudreve",
DefaultRoot: "/", DefaultRoot: "/",
LocalSort: true,
} }
func init() { func init() {

View File

@ -18,8 +18,10 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/setting" "github.com/OpenListTeam/OpenList/v4/internal/setting"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/cookie" "github.com/OpenListTeam/OpenList/v4/pkg/cookie"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go"
) )
@ -235,13 +237,16 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
} }
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error { func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadURLs[0] uploadUrl := u.UploadURLs[0]
credential := u.Credential credential := u.Credential
var finish int64 = 0 var finish int64 = 0
var chunk int = 0 var chunk int = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() { for finish < stream.GetSize() {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
@ -249,30 +254,28 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
left := stream.GetSize() - finish left := stream.GetSize() - finish
byteSize := min(left, DEFAULT) byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()) utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize) rd, err := ss.GetSectionReader(finish, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil { if err != nil {
return err return err
} }
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), err = retry.Do(
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, rd))
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
req.ContentLength = byteSize req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Authorization", fmt.Sprint(credential)) req.Header.Set("Authorization", fmt.Sprint(credential))
req.Header.Set("User-Agent", d.getUA()) req.Header.Set("User-Agent", d.getUA())
err = func() error {
res, err := base.HttpClient.Do(req) res, err := base.HttpClient.Do(req)
if err != nil { if err != nil {
return err return err
} }
defer res.Body.Close() defer res.Body.Close()
if res.StatusCode != 200 { if res.StatusCode != 200 {
return errors.New(res.Status) return fmt.Errorf("server error: %d", res.StatusCode)
} }
body, err := io.ReadAll(res.Body) body, err := io.ReadAll(res.Body)
if err != nil { if err != nil {
@ -287,31 +290,31 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
return errors.New(up.Msg) return errors.New(up.Msg)
} }
return nil return nil
}() },
if err == nil { retry.Attempts(3),
retryCount = 0 retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
finish += byteSize finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize())) up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++ chunk++
} else {
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
time.Sleep(backoff)
}
} }
return nil return nil
} }
func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error { func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadURLs[0] uploadUrl := u.UploadURLs[0]
var finish int64 = 0 var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() { for finish < stream.GetSize() {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
@ -319,47 +322,46 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
left := stream.GetSize() - finish left := stream.GetSize() - finish
byteSize := min(left, DEFAULT) byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()) utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize) rd, err := ss.GetSectionReader(finish, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil { if err != nil {
return err return err
} }
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) err = retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
req.ContentLength = byteSize req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())) req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
req.Header.Set("User-Agent", d.getUA()) req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req) res, err := base.HttpClient.Do(req)
if err != nil { if err != nil {
return err return err
} }
defer res.Body.Close()
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession // https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
switch { switch {
case res.StatusCode >= 500 && res.StatusCode <= 504: case res.StatusCode >= 500 && res.StatusCode <= 504:
retryCount++ return fmt.Errorf("server error: %d", res.StatusCode)
if retryCount > maxRetries {
res.Body.Close()
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200: case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
data, _ := io.ReadAll(res.Body) data, _ := io.ReadAll(res.Body)
res.Body.Close()
return errors.New(string(data)) return errors.New(string(data))
default: default:
res.Body.Close() return nil
retryCount = 0 }
}, retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
finish += byteSize finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize())) up(float64(finish) * 100 / float64(stream.GetSize()))
} }
}
// 上传成功发送回调请求 // 上传成功发送回调请求
return d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) { return d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
req.SetBody("{}") req.SetBody("{}")
@ -367,12 +369,15 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
} }
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error { func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
var finish int64 = 0 var finish int64 = 0
var chunk int = 0 var chunk int = 0
var etags []string var etags []string
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() { for finish < stream.GetSize() {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
@ -380,19 +385,20 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
left := stream.GetSize() - finish left := stream.GetSize() - finish
byteSize := min(left, DEFAULT) byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()) utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize) rd, err := ss.GetSectionReader(finish, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil { if err != nil {
return err return err
} }
req, err := http.NewRequest("PUT", u.UploadURLs[chunk], err = retry.Do(
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData))) func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadURLs[chunk],
driver.NewLimitedUploadStream(ctx, rd))
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
req.ContentLength = byteSize req.ContentLength = byteSize
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req) res, err := base.HttpClient.Do(req)
if err != nil { if err != nil {
return err return err
@ -401,24 +407,25 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
res.Body.Close() res.Body.Close()
switch { switch {
case res.StatusCode != 200: case res.StatusCode != 200:
retryCount++ return fmt.Errorf("server error: %d", res.StatusCode)
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-S3] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case etag == "": case etag == "":
return errors.New("failed to get ETag from header") return errors.New("failed to get ETag from header")
default: default:
retryCount = 0
etags = append(etags, etag) etags = append(etags, etag)
return nil
}
}, retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
finish += byteSize finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize())) up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++ chunk++
} }
}
// s3LikeFinishUpload // s3LikeFinishUpload
// https://github.com/cloudreve/frontend/blob/b485bf297974cbe4834d2e8e744ae7b7e5b2ad39/src/component/Uploader/core/api/index.ts#L204-L252 // https://github.com/cloudreve/frontend/blob/b485bf297974cbe4834d2e8e744ae7b7e5b2ad39/src/component/Uploader/core/api/index.ts#L204-L252
bodyBuilder := &strings.Builder{} bodyBuilder := &strings.Builder{}
@ -431,8 +438,8 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
)) ))
} }
bodyBuilder.WriteString("</CompleteMultipartUpload>") bodyBuilder.WriteString("</CompleteMultipartUpload>")
req, err := http.NewRequest( req, err := http.NewRequestWithContext(ctx,
"POST", http.MethodPost,
u.CompleteURL, u.CompleteURL,
strings.NewReader(bodyBuilder.String()), strings.NewReader(bodyBuilder.String()),
) )

View File

@ -26,15 +26,8 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "Cloudreve V4", Name: "Cloudreve V4",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "cloudreve://my", DefaultRoot: "cloudreve://my",
CheckStatus: true, CheckStatus: true,
Alert: "",
NoOverwriteUpload: true, NoOverwriteUpload: true,
} }

View File

@ -47,7 +47,13 @@ type BasicConfigResp struct {
type SiteLoginConfigResp struct { type SiteLoginConfigResp struct {
LoginCaptcha bool `json:"login_captcha"` LoginCaptcha bool `json:"login_captcha"`
Authn bool `json:"authn"` // RegCaptcha bool `json:"reg_captcha"`
// ForgetCaptcha bool `json:"forget_captcha"`
// RegisterEnabled bool `json:"register_enabled"`
// TosURL string `json:"tos_url"`
// PrivacyPolicyURL string `json:"privacy_policy_url"`
// SsoDisplayName string `json:"sso_display_name"`
// OidcDisplayName string `json:"oidc_display_name"`
} }
type PrepareLoginResp struct { type PrepareLoginResp struct {

View File

@ -19,7 +19,9 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting" "github.com/OpenListTeam/OpenList/v4/internal/setting"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go"
) )
@ -95,9 +97,6 @@ func (d *CloudreveV4) login() error {
if err != nil { if err != nil {
return err return err
} }
if !siteConfig.Authn {
return errors.New("authn not support")
}
var prepareLogin PrepareLoginResp var prepareLogin PrepareLoginResp
err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin) err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
if err != nil { if err != nil {
@ -253,13 +252,16 @@ func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u Fi
} }
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error { func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadUrls[0] uploadUrl := u.UploadUrls[0]
credential := u.Credential credential := u.Credential
var finish int64 = 0 var finish int64 = 0
var chunk int = 0 var chunk int = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() { for finish < file.GetSize() {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
@ -267,30 +269,29 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
left := file.GetSize() - finish left := file.GetSize() - finish
byteSize := min(left, DEFAULT) byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize()) utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize) rd, err := ss.GetSectionReader(finish, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil { if err != nil {
return err return err
} }
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), err = retry.Do(
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, rd))
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
req.ContentLength = byteSize req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Authorization", fmt.Sprint(credential)) req.Header.Set("Authorization", fmt.Sprint(credential))
req.Header.Set("User-Agent", d.getUA()) req.Header.Set("User-Agent", d.getUA())
err = func() error {
res, err := base.HttpClient.Do(req) res, err := base.HttpClient.Do(req)
if err != nil { if err != nil {
return err return err
} }
defer res.Body.Close() defer res.Body.Close()
if res.StatusCode != 200 { if res.StatusCode != 200 {
return errors.New(res.Status) return fmt.Errorf("server error: %d", res.StatusCode)
} }
body, err := io.ReadAll(res.Body) body, err := io.ReadAll(res.Body)
if err != nil { if err != nil {
@ -305,31 +306,30 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
return errors.New(up.Msg) return errors.New(up.Msg)
} }
return nil return nil
}() }, retry.Attempts(3),
if err == nil { retry.DelayType(retry.BackOffDelay),
retryCount = 0 retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
finish += byteSize finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize())) up(float64(finish) * 100 / float64(file.GetSize()))
chunk++ chunk++
} else {
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
time.Sleep(backoff)
}
} }
return nil return nil
} }
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error { func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadUrls[0] uploadUrl := u.UploadUrls[0]
var finish int64 = 0 var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() { for finish < file.GetSize() {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
@ -337,47 +337,47 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
left := file.GetSize() - finish left := file.GetSize() - finish
byteSize := min(left, DEFAULT) byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize()) utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize) rd, err := ss.GetSectionReader(finish, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil { if err != nil {
return err return err
} }
req, err := http.NewRequest(http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) err = retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
req.ContentLength = byteSize req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, file.GetSize())) req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, file.GetSize()))
req.Header.Set("User-Agent", d.getUA()) req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req) res, err := base.HttpClient.Do(req)
if err != nil { if err != nil {
return err return err
} }
defer res.Body.Close()
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession // https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
switch { switch {
case res.StatusCode >= 500 && res.StatusCode <= 504: case res.StatusCode >= 500 && res.StatusCode <= 504:
retryCount++ return fmt.Errorf("server error: %d", res.StatusCode)
if retryCount > maxRetries {
res.Body.Close()
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[CloudreveV4-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200: case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
data, _ := io.ReadAll(res.Body) data, _ := io.ReadAll(res.Body)
res.Body.Close()
return errors.New(string(data)) return errors.New(string(data))
default: default:
res.Body.Close() return nil
retryCount = 0 }
}, retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
finish += byteSize finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize())) up(float64(finish) * 100 / float64(file.GetSize()))
} }
}
// 上传成功发送回调请求 // 上传成功发送回调请求
return d.request(http.MethodPost, "/callback/onedrive/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) { return d.request(http.MethodPost, "/callback/onedrive/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) {
req.SetBody("{}") req.SetBody("{}")
@ -385,12 +385,15 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
} }
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error { func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
var finish int64 = 0 var finish int64 = 0
var chunk int = 0 var chunk int = 0
var etags []string var etags []string
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() { for finish < file.GetSize() {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
@ -398,19 +401,20 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
left := file.GetSize() - finish left := file.GetSize() - finish
byteSize := min(left, DEFAULT) byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize()) utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize) rd, err := ss.GetSectionReader(finish, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil { if err != nil {
return err return err
} }
req, err := http.NewRequest(http.MethodPut, u.UploadUrls[chunk], err = retry.Do(
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData))) func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadUrls[chunk],
driver.NewLimitedUploadStream(ctx, rd))
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
req.ContentLength = byteSize req.ContentLength = byteSize
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req) res, err := base.HttpClient.Do(req)
if err != nil { if err != nil {
return err return err
@ -419,23 +423,26 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
res.Body.Close() res.Body.Close()
switch { switch {
case res.StatusCode != 200: case res.StatusCode != 200:
retryCount++ return fmt.Errorf("server error: %d", res.StatusCode)
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors", maxRetries)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("server error %d, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case etag == "": case etag == "":
return errors.New("failed to get ETag from header") return errors.New("failed to get ETag from header")
default: default:
retryCount = 0
etags = append(etags, etag) etags = append(etags, etag)
return nil
}
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
finish += byteSize finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize())) up(float64(finish) * 100 / float64(file.GetSize()))
chunk++ chunk++
} }
}
// s3LikeFinishUpload // s3LikeFinishUpload
bodyBuilder := &strings.Builder{} bodyBuilder := &strings.Builder{}
@ -448,8 +455,8 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
)) ))
} }
bodyBuilder.WriteString("</CompleteMultipartUpload>") bodyBuilder.WriteString("</CompleteMultipartUpload>")
req, err := http.NewRequest( req, err := http.NewRequestWithContext(ctx,
"POST", http.MethodPost,
u.CompleteURL, u.CompleteURL,
strings.NewReader(bodyBuilder.String()), strings.NewReader(bodyBuilder.String()),
) )

View File

@ -1,12 +1,14 @@
package crypt package crypt
import ( import (
"bytes"
"context" "context"
"fmt" "fmt"
"io" "io"
stdpath "path" stdpath "path"
"regexp" "regexp"
"strings" "strings"
"sync"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
@ -110,7 +112,7 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
//return d.list(ctx, d.RemotePath, path) //return d.list(ctx, d.RemotePath, path)
//remoteFull //remoteFull
objs, err := fs.List(ctx, d.getPathForRemote(path, true), &fs.ListArgs{NoLog: true}) objs, err := fs.List(ctx, d.getPathForRemote(path, true), &fs.ListArgs{NoLog: true, Refresh: args.Refresh})
// the obj must implement the model.SetPath interface // the obj must implement the model.SetPath interface
// return objs, err // return objs, err
if err != nil { if err != nil {
@ -241,6 +243,9 @@ func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) {
//return nil, errs.ObjectNotFound //return nil, errs.ObjectNotFound
} }
// https://github.com/rclone/rclone/blob/v1.67.0/backend/crypt/cipher.go#L37
const fileHeaderSize = 32
func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
dstDirActualPath, err := d.getActualPathForRemote(file.GetPath(), false) dstDirActualPath, err := d.getActualPathForRemote(file.GetPath(), false)
if err != nil { if err != nil {
@ -251,58 +256,68 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, err return nil, err
} }
if remoteLink.RangeReadCloser == nil && remoteLink.MFile == nil && len(remoteLink.URL) == 0 { remoteSize := remoteLink.ContentLength
if remoteSize <= 0 {
remoteSize = remoteFile.GetSize()
}
rrf, err := stream.GetRangeReaderFromLink(remoteSize, remoteLink)
if err != nil {
_ = remoteLink.Close()
return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion") return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion")
} }
resultRangeReadCloser := &model.RangeReadCloser{}
resultRangeReadCloser.TryAdd(remoteLink.MFile) mu := &sync.Mutex{}
if remoteLink.RangeReadCloser != nil { var fileHeader []byte
resultRangeReadCloser.AddClosers(remoteLink.RangeReadCloser.GetClosers()) rangeReaderFunc := func(ctx context.Context, offset, limit int64) (io.ReadCloser, error) {
length := limit
if offset == 0 && limit > 0 {
mu.Lock()
if limit <= fileHeaderSize {
defer mu.Unlock()
if fileHeader != nil {
return io.NopCloser(bytes.NewReader(fileHeader[:limit])), nil
} }
remoteFileSize := remoteFile.GetSize() length = fileHeaderSize
rangeReaderFunc := func(ctx context.Context, underlyingOffset, underlyingLength int64) (io.ReadCloser, error) { } else if fileHeader == nil {
length := underlyingLength defer mu.Unlock()
if underlyingLength >= 0 && underlyingOffset+underlyingLength >= remoteFileSize { } else {
length = -1 mu.Unlock()
} }
if remoteLink.MFile != nil { }
_, err := remoteLink.MFile.Seek(underlyingOffset, io.SeekStart)
remoteReader, err := rrf.RangeRead(ctx, http_range.Range{Start: offset, Length: length})
if err != nil { if err != nil {
return nil, err return nil, err
} }
//keep reuse same MFile and close at last.
return io.NopCloser(remoteLink.MFile), nil if offset == 0 && limit > 0 {
fileHeader = make([]byte, fileHeaderSize)
n, err := io.ReadFull(remoteReader, fileHeader)
if n != fileHeaderSize {
fileHeader = nil
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", fileHeaderSize, n, err)
} }
rrc := remoteLink.RangeReadCloser if limit <= fileHeaderSize {
if rrc == nil && len(remoteLink.URL) > 0 { remoteReader.Close()
var err error return io.NopCloser(bytes.NewReader(fileHeader[:limit])), nil
rrc, err = stream.GetRangeReadCloserFromLink(remoteFileSize, remoteLink) } else {
if err != nil { remoteReader = utils.ReadCloser{
return nil, err Reader: io.MultiReader(bytes.NewReader(fileHeader), remoteReader),
Closer: remoteReader,
} }
resultRangeReadCloser.AddClosers(rrc.GetClosers())
remoteLink.RangeReadCloser = rrc
} }
if rrc != nil {
remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length})
if err != nil {
return nil, err
} }
return remoteReader, nil return remoteReader, nil
} }
return nil, errs.NotSupport return &model.Link{
RangeReader: stream.RangeReaderFunc(func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
}
resultRangeReadCloser.RangeReader = func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length) readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return readSeeker, nil return readSeeker, nil
} }),
SyncClosers: utils.NewSyncClosers(remoteLink),
return &model.Link{
RangeReadCloser: resultRangeReadCloser,
}, nil }, nil
} }
@ -386,7 +401,6 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
}, },
Reader: wrappedIn, Reader: wrappedIn,
Mimetype: "application/octet-stream", Mimetype: "application/octet-stream",
WebPutAsTask: streamer.NeedStore(),
ForceStreamUpload: true, ForceStreamUpload: true,
Exist: streamer.GetExist(), Exist: streamer.GetExist(),
} }

View File

@ -28,15 +28,10 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "Crypt", Name: "Crypt",
LocalSort: true, LocalSort: true,
OnlyLocal: true,
OnlyProxy: true, OnlyProxy: true,
NoCache: true, NoCache: true,
NoUpload: false,
NeedMs: false,
DefaultRoot: "/", DefaultRoot: "/",
CheckStatus: false, NoLinkURL: true,
Alert: "",
NoOverwriteUpload: false,
} }
func init() { func init() {

View File

@ -236,7 +236,7 @@ func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
// 根据文件大小选择上传方式 // 根据文件大小选择上传方式
if file.GetSize() <= 1*utils.MB { // 小于1MB使用普通模式上传 if file.GetSize() <= 1*utils.MB { // 小于1MB使用普通模式上传
return d.Upload(&uploadConfig, dstDir, file, up, dataType) return d.Upload(ctx, &uploadConfig, dstDir, file, up, dataType)
} }
// 大文件使用分片上传 // 大文件使用分片上传
return d.UploadByMultipart(ctx, &uploadConfig, file.GetSize(), dstDir, file, up, dataType) return d.UploadByMultipart(ctx, &uploadConfig, file.GetSize(), dstDir, file, up, dataType)

View File

@ -18,15 +18,7 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "Doubao", Name: "Doubao",
LocalSort: true, LocalSort: true,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "0", DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
} }
func init() { func init() {

View File

@ -129,7 +129,7 @@ type BuiAuditInfo struct {
AuditInfo AuditInfo `json:"audit_info"` AuditInfo AuditInfo `json:"audit_info"`
IsAuditing bool `json:"is_auditing"` IsAuditing bool `json:"is_auditing"`
AuditStatus int `json:"audit_status"` AuditStatus int `json:"audit_status"`
LastUpdateTime int `json:"last_update_time"` LastUpdateTime int64 `json:"last_update_time"`
UnpassReason string `json:"unpass_reason"` UnpassReason string `json:"unpass_reason"`
Details Details `json:"details"` Details Details `json:"details"`
} }
@ -184,7 +184,7 @@ type UserInfo struct {
SecUserID string `json:"sec_user_id"` SecUserID string `json:"sec_user_id"`
SessionKey string `json:"session_key"` SessionKey string `json:"session_key"`
UseHmRegion bool `json:"use_hm_region"` UseHmRegion bool `json:"use_hm_region"`
UserCreateTime int `json:"user_create_time"` UserCreateTime int64 `json:"user_create_time"`
UserID int64 `json:"user_id"` UserID int64 `json:"user_id"`
UserIDStr string `json:"user_id_str"` UserIDStr string `json:"user_id_str"`
UserVerified bool `json:"user_verified"` UserVerified bool `json:"user_verified"`

View File

@ -14,7 +14,7 @@ import (
"math/rand" "math/rand"
"net/http" "net/http"
"net/url" "net/url"
"path/filepath" stdpath "path"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -24,6 +24,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup" "github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go" "github.com/avast/retry-go"
@ -353,7 +354,7 @@ func (d *Doubao) getUploadConfig(upConfig *UploadConfig, dataType string, file m
"ServiceId": d.UploadToken.Alice[dataType].ServiceID, "ServiceId": d.UploadToken.Alice[dataType].ServiceID,
"NeedFallback": "true", "NeedFallback": "true",
"FileSize": strconv.FormatInt(file.GetSize(), 10), "FileSize": strconv.FormatInt(file.GetSize(), 10),
"FileExtension": filepath.Ext(file.GetName()), "FileExtension": stdpath.Ext(file.GetName()),
"s": randomString(), "s": randomString(),
} }
} }
@ -447,39 +448,65 @@ func (d *Doubao) uploadNode(uploadConfig *UploadConfig, dir model.Obj, file mode
} }
// Upload 普通上传实现 // Upload 普通上传实现
func (d *Doubao) Upload(config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) { func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
data, err := io.ReadAll(file) ss, err := stream.NewStreamSectionReader(file, int(file.GetSize()), &up)
if err != nil {
return nil, err
}
reader, err := ss.GetSectionReader(0, file.GetSize())
if err != nil { if err != nil {
return nil, err return nil, err
} }
// 计算CRC32 // 计算CRC32
crc32Hash := crc32.NewIEEE() crc32Hash := crc32.NewIEEE()
crc32Hash.Write(data) w, err := utils.CopyWithBuffer(crc32Hash, reader)
if w != file.GetSize() {
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", file.GetSize(), w, err)
}
crc32Value := hex.EncodeToString(crc32Hash.Sum(nil)) crc32Value := hex.EncodeToString(crc32Hash.Sum(nil))
// 构建请求路径 // 构建请求路径
uploadNode := config.InnerUploadAddress.UploadNodes[0] uploadNode := config.InnerUploadAddress.UploadNodes[0]
storeInfo := uploadNode.StoreInfos[0] storeInfo := uploadNode.StoreInfos[0]
uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI) uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI)
rateLimitedRd := driver.NewLimitedUploadStream(ctx, reader)
uploadResp := UploadResp{} err = d._retryOperation("Upload", func() error {
reader.Seek(0, io.SeekStart)
if _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) { req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl, rateLimitedRd)
req.SetHeaders(map[string]string{ if err != nil {
"Content-Type": "application/octet-stream", return err
"Content-Crc32": crc32Value,
"Content-Length": fmt.Sprintf("%d", len(data)),
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
})
req.SetBody(data)
}, &uploadResp); err != nil {
return nil, err
} }
req.Header = map[string][]string{
if uploadResp.Code != 2000 { "Referer": {BaseURL + "/"},
return nil, fmt.Errorf("upload failed: %s", uploadResp.Message) "Origin": {BaseURL},
"User-Agent": {UserAgent},
"X-Storage-U": {d.UserId},
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {fmt.Sprintf("%d", file.GetSize())},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
resp := UploadResp{}
utils.Json.Unmarshal(bytes, &resp)
if resp.Code != 2000 {
return fmt.Errorf("upload part failed: %s", resp.Message)
} else if resp.Data.Crc32 != crc32Value {
return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, resp.Data.Crc32)
}
return nil
})
ss.FreeSectionReader(reader)
if err != nil {
return nil, err
} }
uploadNodeResp, err := d.uploadNode(config, dstDir, file, dataType) uploadNodeResp, err := d.uploadNode(config, dstDir, file, dataType)
@ -516,68 +543,107 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
if config.InnerUploadAddress.AdvanceOption.SliceSize > 0 { if config.InnerUploadAddress.AdvanceOption.SliceSize > 0 {
chunkSize = int64(config.InnerUploadAddress.AdvanceOption.SliceSize) chunkSize = int64(config.InnerUploadAddress.AdvanceOption.SliceSize)
} }
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return nil, err
}
totalParts := (fileSize + chunkSize - 1) / chunkSize totalParts := (fileSize + chunkSize - 1) / chunkSize
// 创建分片信息组 // 创建分片信息组
parts := make([]UploadPart, totalParts) parts := make([]UploadPart, totalParts)
// 缓存文件
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, fmt.Errorf("failed to cache file: %w", err)
}
up(10.0) // 更新进度 up(10.0) // 更新进度
// 设置并行上传 // 设置并行上传
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread, thread := min(int(totalParts), d.uploadThread)
retry.Attempts(1), threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(MaxRetryAttempts),
retry.Delay(time.Second), retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay)) retry.DelayType(retry.BackOffDelay),
retry.MaxJitter(200*time.Millisecond),
)
var partsMutex sync.Mutex var partsMutex sync.Mutex
// 并行上传所有分片 // 并行上传所有分片
for partIndex := int64(0); partIndex < totalParts; partIndex++ { hash := crc32.NewIEEE()
for partIndex := range totalParts {
if utils.IsCanceled(uploadCtx) { if utils.IsCanceled(uploadCtx) {
break break
} }
partIndex := partIndex
partNumber := partIndex + 1 // 分片编号从1开始 partNumber := partIndex + 1 // 分片编号从1开始
threadG.Go(func(ctx context.Context) error {
// 计算此分片的大小和偏移 // 计算此分片的大小和偏移
offset := partIndex * chunkSize offset := partIndex * chunkSize
size := chunkSize size := chunkSize
if partIndex == totalParts-1 { if partIndex == totalParts-1 {
size = fileSize - offset size = fileSize - offset
} }
var reader *stream.SectionReader
limitedReader := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, size)) var rateLimitedRd io.Reader
// 读取数据到内存 crc32Value := ""
data, err := io.ReadAll(limitedReader) threadG.GoWithLifecycle(errgroup.Lifecycle{
if err != nil { Before: func(ctx context.Context) error {
return fmt.Errorf("failed to read part %d: %w", partNumber, err) if reader == nil {
}
// 计算CRC32
crc32Value := calculateCRC32(data)
// 使用_retryOperation上传分片
var uploadPart UploadPart
if err = d._retryOperation(fmt.Sprintf("Upload part %d", partNumber), func() error {
var err error var err error
uploadPart, err = d.uploadPart(config, uploadUrl, uploadID, partNumber, data, crc32Value) reader, err = ss.GetSectionReader(offset, size)
if err != nil {
return err return err
}); err != nil { }
return fmt.Errorf("part %d upload failed: %w", partNumber, err) hash.Reset()
w, err := utils.CopyWithBuffer(hash, reader)
if w != size {
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", size, w, err)
}
crc32Value = hex.EncodeToString(hash.Sum(nil))
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
return nil
},
Do: func(ctx context.Context) error {
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s?uploadid=%s&part_number=%d&phase=transfer", uploadUrl, uploadID, partNumber), rateLimitedRd)
if err != nil {
return err
}
req.Header = map[string][]string{
"Referer": {BaseURL + "/"},
"Origin": {BaseURL},
"User-Agent": {UserAgent},
"X-Storage-U": {d.UserId},
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {fmt.Sprintf("%d", size)},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
uploadResp := UploadResp{}
utils.Json.Unmarshal(bytes, &uploadResp)
if uploadResp.Code != 2000 {
return fmt.Errorf("upload part failed: %s", uploadResp.Message)
} else if uploadResp.Data.Crc32 != crc32Value {
return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32)
} }
// 记录成功上传的分片 // 记录成功上传的分片
partsMutex.Lock() partsMutex.Lock()
parts[partIndex] = UploadPart{ parts[partIndex] = UploadPart{
PartNumber: strconv.FormatInt(partNumber, 10), PartNumber: strconv.FormatInt(partNumber, 10),
Etag: uploadPart.Etag, Etag: uploadResp.Data.Etag,
Crc32: crc32Value, Crc32: crc32Value,
} }
partsMutex.Unlock() partsMutex.Unlock()
// 更新进度 // 更新进度
progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts) progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts)
up(math.Min(progress, 95.0)) up(math.Min(progress, 95.0))
return nil return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
}) })
} }
@ -680,42 +746,6 @@ func (d *Doubao) initMultipartUpload(config *UploadConfig, uploadUrl string, sto
return uploadResp.Data.UploadId, nil return uploadResp.Data.UploadId, nil
} }
// 分片上传实现
func (d *Doubao) uploadPart(config *UploadConfig, uploadUrl, uploadID string, partNumber int64, data []byte, crc32Value string) (resp UploadPart, err error) {
uploadResp := UploadResp{}
storeInfo := config.InnerUploadAddress.UploadNodes[0].StoreInfos[0]
_, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Type": "application/octet-stream",
"Content-Crc32": crc32Value,
"Content-Length": fmt.Sprintf("%d", len(data)),
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
})
req.SetQueryParams(map[string]string{
"uploadid": uploadID,
"part_number": strconv.FormatInt(partNumber, 10),
"phase": "transfer",
})
req.SetBody(data)
req.SetContentLength(true)
}, &uploadResp)
if err != nil {
return resp, err
}
if uploadResp.Code != 2000 {
return resp, fmt.Errorf("upload part failed: %s", uploadResp.Message)
} else if uploadResp.Data.Crc32 != crc32Value {
return resp, fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32)
}
return uploadResp.Data, nil
}
// 完成分片上传 // 完成分片上传
func (d *Doubao) completeMultipartUpload(config *UploadConfig, uploadUrl, uploadID string, parts []UploadPart) error { func (d *Doubao) completeMultipartUpload(config *UploadConfig, uploadUrl, uploadID string, parts []UploadPart) error {
uploadResp := UploadResp{} uploadResp := UploadResp{}
@ -784,13 +814,6 @@ func (d *Doubao) commitMultipartUpload(uploadConfig *UploadConfig) error {
return nil return nil
} }
// 计算CRC32
func calculateCRC32(data []byte) string {
hash := crc32.NewIEEE()
hash.Write(data)
return hex.EncodeToString(hash.Sum(nil))
}
// _retryOperation 操作重试 // _retryOperation 操作重试
func (d *Doubao) _retryOperation(operation string, fn func() error) error { func (d *Doubao) _retryOperation(operation string, fn func() error) error {
return retry.Do( return retry.Do(

View File

@ -14,15 +14,8 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "DoubaoShare", Name: "DoubaoShare",
LocalSort: true, LocalSort: true,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: true, NoUpload: true,
NeedMs: false,
DefaultRoot: "/", DefaultRoot: "/",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
} }
func init() { func init() {

View File

@ -79,11 +79,11 @@ type ShareInfo struct {
RiskReviewStatus int `json:"risk_review_status"` RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"` ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"` ParentID string `json:"parent_id"`
CreateTime int `json:"create_time"` CreateTime int64 `json:"create_time"`
UpdateTime int `json:"update_time"` UpdateTime int64 `json:"update_time"`
} `json:"first_node"` } `json:"first_node"`
NodeCount int `json:"node_count"` NodeCount int `json:"node_count"`
CreateTime int `json:"create_time"` CreateTime int64 `json:"create_time"`
Channel string `json:"channel"` Channel string `json:"channel"`
InfluencerType int `json:"influencer_type"` InfluencerType int `json:"influencer_type"`
} }
@ -111,8 +111,8 @@ type FilePath []struct {
RiskReviewStatus int `json:"risk_review_status"` RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"` ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"` ParentID string `json:"parent_id"`
CreateTime int `json:"create_time"` CreateTime int64 `json:"create_time"`
UpdateTime int `json:"update_time"` UpdateTime int64 `json:"update_time"`
} }
type GetFileUrlResp struct { type GetFileUrlResp struct {

View File

@ -192,12 +192,11 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
url := d.contentBase + "/2/files/upload_session/append_v2" url := d.contentBase + "/2/files/upload_session/append_v2"
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize)) reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize))
req, err := http.NewRequest(http.MethodPost, url, reader) req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, reader)
if err != nil { if err != nil {
log.Errorf("failed to update file when append to upload session, err: %+v", err) log.Errorf("failed to update file when append to upload session, err: %+v", err)
return err return err
} }
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken) req.Header.Set("Authorization", "Bearer "+d.AccessToken)

View File

@ -18,13 +18,6 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "Dropbox", Name: "Dropbox",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "",
NoOverwriteUpload: true, NoOverwriteUpload: true,
} }

View File

@ -169,11 +169,10 @@ func (d *Dropbox) getFiles(ctx context.Context, path string) ([]File, error) {
func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset int64, sessionId string) error { func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset int64, sessionId string) error {
url := d.contentBase + "/2/files/upload_session/finish" url := d.contentBase + "/2/files/upload_session/finish"
req, err := http.NewRequest(http.MethodPost, url, nil) req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken) req.Header.Set("Authorization", "Bearer "+d.AccessToken)
@ -214,11 +213,10 @@ func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset
func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) { func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
url := d.contentBase + "/2/files/upload_session/start" url := d.contentBase + "/2/files/upload_session/start"
req, err := http.NewRequest(http.MethodPost, url, nil) req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
if err != nil { if err != nil {
return "", err return "", err
} }
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken) req.Header.Set("Authorization", "Bearer "+d.AccessToken)
req.Header.Set("Dropbox-API-Arg", "{\"close\":false}") req.Header.Set("Dropbox-API-Arg", "{\"close\":false}")

View File

@ -17,16 +17,8 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "FebBox", Name: "FebBox",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: true, NoUpload: true,
NeedMs: false,
DefaultRoot: "0", DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
} }
func init() { func init() {

View File

@ -31,13 +31,13 @@ func (c *customTokenSource) Token() (*oauth2.Token, error) {
v.Set("client_id", c.config.ClientID) v.Set("client_id", c.config.ClientID)
v.Set("client_secret", c.config.ClientSecret) v.Set("client_secret", c.config.ClientSecret)
req, err := http.NewRequest("POST", c.config.TokenURL, strings.NewReader(v.Encode())) req, err := http.NewRequestWithContext(c.ctx, http.MethodPost, c.config.TokenURL, strings.NewReader(v.Encode()))
if err != nil { if err != nil {
return nil, err return nil, err
} }
req.Header.Set("Content-Type", "application/x-www-form-urlencoded") req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := http.DefaultClient.Do(req.WithContext(c.ctx)) resp, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -2,12 +2,16 @@ package ftp
import ( import (
"context" "context"
"errors"
"io"
stdpath "path" stdpath "path"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/jlaffaye/ftp" "github.com/jlaffaye/ftp"
) )
@ -15,6 +19,9 @@ type FTP struct {
model.Storage model.Storage
Addition Addition
conn *ftp.ServerConn conn *ftp.ServerConn
ctx context.Context
cancel context.CancelFunc
} }
func (d *FTP) Config() driver.Config { func (d *FTP) Config() driver.Config {
@ -26,12 +33,16 @@ func (d *FTP) GetAddition() driver.Additional {
} }
func (d *FTP) Init(ctx context.Context) error { func (d *FTP) Init(ctx context.Context) error {
return d.login() d.ctx, d.cancel = context.WithCancel(context.Background())
var err error
d.conn, err = d._login(ctx)
return err
} }
func (d *FTP) Drop(ctx context.Context) error { func (d *FTP) Drop(ctx context.Context) error {
if d.conn != nil { if d.conn != nil {
_ = d.conn.Logout() _ = d.conn.Quit()
d.cancel()
} }
return nil return nil
} }
@ -61,19 +72,52 @@ func (d *FTP) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]m
} }
func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if err := d.login(); err != nil { conn, err := d._login(ctx)
if err != nil {
return nil, err return nil, err
} }
r := NewFileReader(d.conn, encode(file.GetPath(), d.Encoding), file.GetSize()) path := encode(file.GetPath(), d.Encoding)
link := &model.Link{ size := file.GetSize()
MFile: &stream.RateLimitFile{ resultRangeReader := func(context context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
File: r, length := httpRange.Length
Limiter: stream.ServerDownloadLimit, if length < 0 || httpRange.Start+length > size {
Ctx: ctx, length = size - httpRange.Start
},
} }
return link, nil var c *ftp.ServerConn
if ctx == context {
c = conn
} else {
var err error
c, err = d._login(context)
if err != nil {
return nil, err
}
}
resp, err := c.RetrFrom(path, uint64(httpRange.Start))
if err != nil {
return nil, err
}
var close utils.CloseFunc
if context == ctx {
close = resp.Close
} else {
close = func() error {
return errors.Join(resp.Close(), c.Quit())
}
}
return utils.ReadCloser{
Reader: io.LimitReader(resp, length),
Closer: close,
}, nil
}
return &model.Link{
RangeReader: &model.FileRangeReader{
RangeReaderIF: stream.RateLimitRangeReaderFunc(resultRangeReader),
},
SyncClosers: utils.NewSyncClosers(utils.CloseFunc(conn.Quit)),
}, nil
} }
func (d *FTP) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { func (d *FTP) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {

View File

@ -33,8 +33,9 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "FTP", Name: "FTP",
LocalSort: true, LocalSort: true,
OnlyLocal: true, OnlyLinkMFile: false,
DefaultRoot: "/", DefaultRoot: "/",
NoLinkURL: true,
} }
func init() { func init() {

View File

@ -1,116 +1,43 @@
package ftp package ftp
import ( import (
"io" "context"
"os" "fmt"
"sync"
"sync/atomic"
"time" "time"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/jlaffaye/ftp" "github.com/jlaffaye/ftp"
) )
// do others that not defined in Driver interface // do others that not defined in Driver interface
func (d *FTP) login() error { func (d *FTP) login() error {
_, err, _ := singleflight.AnyGroup.Do(fmt.Sprintf("FTP.login:%p", d), func() (any, error) {
var err error
if d.conn != nil { if d.conn != nil {
_, err := d.conn.CurrentDir() err = d.conn.NoOp()
if err == nil {
return nil
}
}
conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second))
if err != nil { if err != nil {
d.conn.Quit()
d.conn = nil
}
}
if d.conn == nil {
d.conn, err = d._login(d.ctx)
}
return nil, err
})
return err return err
}
func (d *FTP) _login(ctx context.Context) (*ftp.ServerConn, error) {
conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second), ftp.DialWithContext(ctx))
if err != nil {
return nil, err
} }
err = conn.Login(d.Username, d.Password) err = conn.Login(d.Username, d.Password)
if err != nil { if err != nil {
return err conn.Quit()
return nil, err
} }
d.conn = conn return conn, nil
return nil
}
// FileReader An FTP file reader that implements io.MFile for seeking.
type FileReader struct {
conn *ftp.ServerConn
resp *ftp.Response
offset atomic.Int64
readAtOffset int64
mu sync.Mutex
path string
size int64
}
func NewFileReader(conn *ftp.ServerConn, path string, size int64) *FileReader {
return &FileReader{
conn: conn,
path: path,
size: size,
}
}
func (r *FileReader) Read(buf []byte) (n int, err error) {
n, err = r.ReadAt(buf, r.offset.Load())
r.offset.Add(int64(n))
return
}
func (r *FileReader) ReadAt(buf []byte, off int64) (n int, err error) {
if off < 0 {
return -1, os.ErrInvalid
}
r.mu.Lock()
defer r.mu.Unlock()
if off != r.readAtOffset {
//have to restart the connection, to correct offset
_ = r.resp.Close()
r.resp = nil
}
if r.resp == nil {
r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
r.readAtOffset = off
if err != nil {
return 0, err
}
}
n, err = r.resp.Read(buf)
r.readAtOffset += int64(n)
return
}
func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
oldOffset := r.offset.Load()
var newOffset int64
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekCurrent:
newOffset = oldOffset + offset
case io.SeekEnd:
return r.size, nil
default:
return -1, os.ErrInvalid
}
if newOffset < 0 {
// offset out of range
return oldOffset, os.ErrInvalid
}
if newOffset == oldOffset {
// offset not changed, so return directly
return oldOffset, nil
}
r.offset.Store(newOffset)
return newOffset, nil
}
func (r *FileReader) Close() error {
if r.resp != nil {
return r.resp.Close()
}
return nil
} }

View File

@ -9,6 +9,7 @@ import (
"text/template" "text/template"
"time" "time"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/ProtonMail/go-crypto/openpgp" "github.com/ProtonMail/go-crypto/openpgp"
@ -96,7 +97,7 @@ func getPathCommonAncestor(a, b string) (ancestor, aChildName, bChildName, aRest
} }
func getUsername(ctx context.Context) string { func getUsername(ctx context.Context) string {
user, ok := ctx.Value("user").(*model.User) user, ok := ctx.Value(conf.UserKey).(*model.User)
if !ok { if !ok {
return "<system>" return "<system>"
} }

View File

@ -16,16 +16,7 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "GitHub Releases", Name: "GitHub Releases",
LocalSort: false, NoUpload: true,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
} }
func init() { func init() {

View File

@ -162,7 +162,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
SetBody(driver.NewLimitedUploadStream(ctx, stream)) SetBody(driver.NewLimitedUploadStream(ctx, stream))
}, nil) }, nil)
} else { } else {
err = d.chunkUpload(ctx, stream, putUrl) err = d.chunkUpload(ctx, stream, putUrl, up)
} }
return err return err
} }

View File

@ -5,17 +5,20 @@ import (
"crypto/x509" "crypto/x509"
"encoding/pem" "encoding/pem"
"fmt" "fmt"
"github.com/OpenListTeam/OpenList/v4/internal/op" "io"
"net/http" "net/http"
"os" "os"
"regexp" "regexp"
"strconv" "strconv"
"time" "time"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/avast/retry-go"
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
"github.com/golang-jwt/jwt/v4" "github.com/golang-jwt/jwt/v4"
@ -251,28 +254,60 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
return res, nil return res, nil
} }
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error { func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
var defaultChunkSize = d.ChunkSize * 1024 * 1024 var defaultChunkSize = d.ChunkSize * 1024 * 1024
var offset int64 = 0 ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)
for offset < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
chunkSize := stream.GetSize() - offset
if chunkSize > defaultChunkSize {
chunkSize = defaultChunkSize
}
reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize})
if err != nil { if err != nil {
return err return err
} }
reader = driver.NewLimitedUploadStream(ctx, reader)
_, err = d.request(url, http.MethodPut, func(req *resty.Request) { var offset int64 = 0
req.SetHeaders(map[string]string{ url += "?includeItemsFromAllDrives=true&supportsAllDrives=true"
"Content-Length": strconv.FormatInt(chunkSize, 10), for offset < file.GetSize() {
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()), if utils.IsCanceled(ctx) {
}).SetBody(reader).SetContext(ctx) return ctx.Err()
}, nil) }
chunkSize := min(file.GetSize()-offset, defaultChunkSize)
reader, err := ss.GetSectionReader(offset, chunkSize)
if err != nil {
return err
}
limitedReader := driver.NewLimitedUploadStream(ctx, reader)
err = retry.Do(func() error {
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, limitedReader)
if err != nil {
return err
}
req.Header = map[string][]string{
"Authorization": {"Bearer " + d.AccessToken},
"Content-Length": {strconv.FormatInt(chunkSize, 10)},
"Content-Range": {fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, file.GetSize())},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
var e Error
utils.Json.Unmarshal(bytes, &e)
if e.Error.Code != 0 {
if e.Error.Code == 401 {
err = d.refreshToken()
if err != nil {
return err
}
}
return fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
}
up(float64(offset+chunkSize) / float64(file.GetSize()) * 100)
return nil
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
ss.FreeSectionReader(reader)
if err != nil { if err != nil {
return err return err
} }

View File

@ -14,6 +14,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range" "github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"
@ -253,8 +254,8 @@ func (d *HalalCloud) getLink(ctx context.Context, file model.Obj, args model.Lin
chunks := getChunkSizes(result.Sizes) chunks := getChunkSizes(result.Sizes)
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
length := httpRange.Length length := httpRange.Length
if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size { if httpRange.Length < 0 || httpRange.Start+httpRange.Length >= size {
length = -1 length = size - httpRange.Start
} }
oo := &openObject{ oo := &openObject{
ctx: ctx, ctx: ctx,
@ -276,9 +277,8 @@ func (d *HalalCloud) getLink(ctx context.Context, file model.Obj, args model.Lin
duration = time.Until(time.Now().Add(time.Hour)) duration = time.Until(time.Now().Add(time.Hour))
} }
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader}
return &model.Link{ return &model.Link{
RangeReadCloser: resultRangeReadCloser, RangeReader: stream.RateLimitRangeReaderFunc(resultRangeReader),
Expiration: &duration, Expiration: &duration,
}, nil }, nil
} }

View File

@ -19,16 +19,9 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "HalalCloud", Name: "HalalCloud",
LocalSort: false,
OnlyLocal: true,
OnlyProxy: true, OnlyProxy: true,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "/", DefaultRoot: "/",
CheckStatus: false, NoLinkURL: true,
Alert: "",
NoOverwriteUpload: false,
} }
func init() { func init() {

View File

@ -276,7 +276,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
etag := s.GetHash().GetHash(utils.MD5) etag := s.GetHash().GetHash(utils.MD5)
var err error var err error
if len(etag) != utils.MD5.Width { if len(etag) != utils.MD5.Width {
_, etag, err = stream.CacheFullInTempFileAndHash(s, utils.MD5) _, etag, err = stream.CacheFullAndHash(s, &up, utils.MD5)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -30,16 +30,8 @@ func init() {
return &ILanZou{ return &ILanZou{
config: driver.Config{ config: driver.Config{
Name: "ILanZou", Name: "ILanZou",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "0", DefaultRoot: "0",
CheckStatus: false, LocalSort: true,
Alert: "",
NoOverwriteUpload: false,
}, },
conf: Conf{ conf: Conf{
base: "https://api.ilanzou.com", base: "https://api.ilanzou.com",
@ -56,16 +48,8 @@ func init() {
return &ILanZou{ return &ILanZou{
config: driver.Config{ config: driver.Config{
Name: "FeijiPan", Name: "FeijiPan",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "0", DefaultRoot: "0",
CheckStatus: false, LocalSort: true,
Alert: "",
NoOverwriteUpload: false,
}, },
conf: Conf{ conf: Conf{
base: "https://api.feijipan.com", base: "https://api.feijipan.com",

View File

@ -17,7 +17,6 @@ var config = driver.Config{
Name: "IPFS API", Name: "IPFS API",
DefaultRoot: "/", DefaultRoot: "/",
LocalSort: true, LocalSort: true,
OnlyProxy: false,
} }
func init() { func init() {

View File

@ -15,7 +15,6 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "KodBox", Name: "KodBox",
DefaultRoot: "",
} }
func init() { func init() {

View File

@ -3,6 +3,8 @@ package LenovoNasShare
import ( import (
"context" "context"
"net/http" "net/http"
"net/url"
"strings"
"time" "time"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
@ -32,6 +34,10 @@ func (d *LenovoNasShare) Init(ctx context.Context) error {
if err := d.getStoken(); err != nil { if err := d.getStoken(); err != nil {
return err return err
} }
if !d.ShowRootFolder && d.RootFolderPath == "" {
list, _ := d.List(ctx, File{}, model.ListArgs{})
d.RootFolderPath = list[0].GetPath()
}
return nil return nil
} }
@ -43,23 +49,46 @@ func (d *LenovoNasShare) List(ctx context.Context, dir model.Obj, args model.Lis
d.checkStoken() // 检查stoken是否过期 d.checkStoken() // 检查stoken是否过期
files := make([]File, 0) files := make([]File, 0)
path := dir.GetPath()
if path == "" && !d.ShowRootFolder && d.RootFolderPath != "" {
path = d.RootFolderPath
}
var resp Files var resp Files
query := map[string]string{ query := map[string]string{
"code": d.ShareId, "code": d.ShareId,
"num": "5000", "num": "5000",
"stoken": d.stoken, "stoken": d.stoken,
"path": dir.GetPath(), "path": path,
} }
_, err := d.request(d.Host+"/oneproxy/api/share/v1/files", http.MethodGet, func(req *resty.Request) { _, err := d.request(d.Host+"/oneproxy/api/share/v1/files", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query) req.SetQueryParams(query)
}, &resp) }, &resp)
if err != nil { if err != nil {
return nil, err return nil, err
} }
files = append(files, resp.Data.List...) files = append(files, resp.Data.List...)
return utils.SliceConvert(files, func(src File) (model.Obj, error) { return utils.SliceConvert(files, func(src File) (model.Obj, error) {
if src.IsDir() {
return src, nil return src, nil
}
return &model.ObjThumb{
Object: model.Object{
Name: src.GetName(),
Size: src.GetSize(),
Modified: src.ModTime(),
IsFolder: src.IsDir(),
},
Thumbnail: model.Thumbnail{
Thumbnail: func() string {
thumbUrl := d.Host + "/oneproxy/api/share/v1/file/thumb?code=" + d.ShareId + "&stoken=" + d.stoken + "&path=" + url.QueryEscape(src.GetPath())
return thumbUrl
}(),
},
}, nil
}) })
} }
@ -73,6 +102,10 @@ func (d *LenovoNasShare) getStoken() error { // 获取stoken
if d.Host == "" { if d.Host == "" {
d.Host = "https://siot-share.lenovo.com.cn" d.Host = "https://siot-share.lenovo.com.cn"
} }
parts := strings.Split(d.ShareId, "/")
d.ShareId = parts[len(parts)-1]
query := map[string]string{ query := map[string]string{
"code": d.ShareId, "code": d.ShareId,
"password": d.SharePwd, "password": d.SharePwd,

View File

@ -10,20 +10,13 @@ type Addition struct {
ShareId string `json:"share_id" required:"true" help:"The part after the last / in the shared link"` ShareId string `json:"share_id" required:"true" help:"The part after the last / in the shared link"`
SharePwd string `json:"share_pwd" required:"true" help:"The password of the shared link"` SharePwd string `json:"share_pwd" required:"true" help:"The password of the shared link"`
Host string `json:"host" required:"true" default:"https://siot-share.lenovo.com.cn" help:"You can change it to your local area network"` Host string `json:"host" required:"true" default:"https://siot-share.lenovo.com.cn" help:"You can change it to your local area network"`
ShowRootFolder bool `json:"show_root_folder" default:"true"`
} }
var config = driver.Config{ var config = driver.Config{
Name: "LenovoNasShare", Name: "LenovoNasShare",
LocalSort: true, LocalSort: true,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: true, NoUpload: true,
NeedMs: false,
DefaultRoot: "",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
} }
func init() { func init() {

View File

@ -0,0 +1,92 @@
package local
// TestDirCalculateSize tests the directory size calculation
// It should be run with the local driver enabled and directory size calculation set to true
import (
"os"
"path/filepath"
"strconv"
"testing"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
)
func generatedTestDir(dir string, dep, filecount int) {
if dep == 0 {
return
}
for i := 0; i < dep; i++ {
subDir := dir + "/dir" + strconv.Itoa(i)
os.Mkdir(subDir, 0755)
generatedTestDir(subDir, dep-1, filecount)
generatedFiles(subDir, filecount)
}
}
func generatedFiles(path string, count int) error {
for i := 0; i < count; i++ {
filePath := filepath.Join(path, "file"+strconv.Itoa(i)+".txt")
file, err := os.Create(filePath)
if err != nil {
return err
}
// 使用随机ascii字符填充文件
content := make([]byte, 1024) // 1KB file
for j := range content {
content[j] = byte('a' + j%26) // Fill with 'a' to 'z'
}
_, err = file.Write(content)
if err != nil {
return err
}
file.Close()
}
return nil
}
// performance tests for directory size calculation
func BenchmarkCalculateDirSize(t *testing.B) {
// 初始化t的日志
t.Logf("Starting performance test for directory size calculation")
// 确保测试目录存在
if testing.Short() {
t.Skip("Skipping performance test in short mode")
}
// 创建tmp directory for testing
testTempDir := t.TempDir()
err := os.MkdirAll(testTempDir, 0755)
if err != nil {
t.Fatalf("Failed to create test directory: %v", err)
}
defer os.RemoveAll(testTempDir) // Clean up after test
// 构建一个深度为5每层10个文件和10个目录的目录结构
generatedTestDir(testTempDir, 5, 10)
// Initialize the local driver with directory size calculation enabled
d := &Local{
directoryMap: DirectoryMap{
root: testTempDir,
},
Addition: Addition{
DirectorySize: true,
RootPath: driver.RootPath{
RootFolderPath: testTempDir,
},
},
}
//record the start time
t.StartTimer()
// Calculate the directory size
err = d.directoryMap.RecalculateDirSize()
if err != nil {
t.Fatalf("Failed to calculate directory size: %v", err)
}
//record the end time
t.StopTimer()
// Print the size and duration
node, ok := d.directoryMap.Get(d.directoryMap.root)
if !ok {
t.Fatalf("Failed to get root node from directory map")
}
t.Logf("Directory size: %d bytes", node.fileSum+node.directorySum)
t.Logf("Performance test completed successfully")
}

View File

@ -19,6 +19,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/sign" "github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common" "github.com/OpenListTeam/OpenList/v4/server/common"
"github.com/OpenListTeam/times" "github.com/OpenListTeam/times"
@ -32,6 +33,9 @@ type Local struct {
Addition Addition
mkdirPerm int32 mkdirPerm int32
// directory size data
directoryMap DirectoryMap
// zero means no limit // zero means no limit
thumbConcurrency int thumbConcurrency int
thumbTokenBucket TokenBucket thumbTokenBucket TokenBucket
@ -65,6 +69,15 @@ func (d *Local) Init(ctx context.Context) error {
} }
d.Addition.RootFolderPath = abs d.Addition.RootFolderPath = abs
} }
if d.DirectorySize {
d.directoryMap.root = d.GetRootPath()
_, err := d.directoryMap.CalculateDirSize(d.GetRootPath())
if err != nil {
return err
}
} else {
d.directoryMap.Clear()
}
if d.ThumbCacheFolder != "" && !utils.Exists(d.ThumbCacheFolder) { if d.ThumbCacheFolder != "" && !utils.Exists(d.ThumbCacheFolder) {
err := os.MkdirAll(d.ThumbCacheFolder, os.FileMode(d.mkdirPerm)) err := os.MkdirAll(d.ThumbCacheFolder, os.FileMode(d.mkdirPerm))
if err != nil { if err != nil {
@ -123,6 +136,9 @@ func (d *Local) GetAddition() driver.Additional {
func (d *Local) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { func (d *Local) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
fullPath := dir.GetPath() fullPath := dir.GetPath()
rawFiles, err := readDir(fullPath) rawFiles, err := readDir(fullPath)
if d.DirectorySize && args.Refresh {
d.directoryMap.RecalculateDirSize()
}
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -146,7 +162,12 @@ func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string
} }
isFolder := f.IsDir() || isSymlinkDir(f, fullPath) isFolder := f.IsDir() || isSymlinkDir(f, fullPath)
var size int64 var size int64
if !isFolder { if isFolder {
node, ok := d.directoryMap.Get(filepath.Join(fullPath, f.Name()))
if ok {
size = node.fileSum + node.directorySum
}
} else {
size = f.Size() size = f.Size()
} }
var ctime time.Time var ctime time.Time
@ -172,19 +193,6 @@ func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string
} }
return &file return &file
} }
func (d *Local) GetMeta(ctx context.Context, path string) (model.Obj, error) {
f, err := os.Stat(path)
if err != nil {
return nil, err
}
file := d.FileInfoToObj(ctx, f, path, path)
//h := "123123"
//if s, ok := f.(model.SetHash); ok && file.GetHash() == ("","") {
// s.SetHash(h,"SHA1")
//}
return file, nil
}
func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) { func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
path = filepath.Join(d.GetRootPath(), path) path = filepath.Join(d.GetRootPath(), path)
@ -198,7 +206,12 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
isFolder := f.IsDir() || isSymlinkDir(f, path) isFolder := f.IsDir() || isSymlinkDir(f, path)
size := f.Size() size := f.Size()
if isFolder { if isFolder {
size = 0 node, ok := d.directoryMap.Get(path)
if ok {
size = node.fileSum + node.directorySum
}
} else {
size = f.Size()
} }
var ctime time.Time var ctime time.Time
t, err := times.Stat(path) t, err := times.Stat(path)
@ -220,7 +233,7 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
fullPath := file.GetPath() fullPath := file.GetPath()
var link model.Link link := &model.Link{}
if args.Type == "thumb" && utils.Ext(file.GetName()) != "svg" { if args.Type == "thumb" && utils.Ext(file.GetName()) != "svg" {
var buf *bytes.Buffer var buf *bytes.Buffer
var thumbPath *string var thumbPath *string
@ -240,19 +253,32 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Get thumbnail file size for Content-Length
stat, err := open.Stat()
if err != nil {
open.Close()
return nil, err
}
link.ContentLength = int64(stat.Size())
link.MFile = open link.MFile = open
} else { } else {
link.MFile = bytes.NewReader(buf.Bytes()) link.MFile = bytes.NewReader(buf.Bytes())
//link.Header.Set("Content-Length", strconv.Itoa(buf.Len())) link.ContentLength = int64(buf.Len())
} }
} else { } else {
open, err := os.Open(fullPath) open, err := os.Open(fullPath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
link.ContentLength = file.GetSize()
link.MFile = open link.MFile = open
} }
return &link, nil link.AddIfCloser(link.MFile)
if !d.Config().OnlyLinkMFile {
link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, link.MFile)
link.MFile = nil
}
return link, nil
} }
func (d *Local) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { func (d *Local) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
@ -270,22 +296,31 @@ func (d *Local) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
if utils.IsSubPath(srcPath, dstPath) { if utils.IsSubPath(srcPath, dstPath) {
return fmt.Errorf("the destination folder is a subfolder of the source folder") return fmt.Errorf("the destination folder is a subfolder of the source folder")
} }
if err := os.Rename(srcPath, dstPath); err != nil && strings.Contains(err.Error(), "invalid cross-device link") { err := os.Rename(srcPath, dstPath)
// Handle cross-device file move in local driver if err != nil && strings.Contains(err.Error(), "invalid cross-device link") {
if err = d.Copy(ctx, srcObj, dstDir); err != nil { // 跨设备移动,先复制再删除
if err := d.Copy(ctx, srcObj, dstDir); err != nil {
return err return err
} else { }
// Directly remove file without check recycle bin if successfully copied // 复制成功后直接删除源文件/文件夹
if srcObj.IsDir() { if srcObj.IsDir() {
err = os.RemoveAll(srcObj.GetPath()) return os.RemoveAll(srcObj.GetPath())
} else { }
err = os.Remove(srcObj.GetPath()) return os.Remove(srcObj.GetPath())
}
if err == nil {
srcParent := filepath.Dir(srcPath)
dstParent := filepath.Dir(dstPath)
if d.directoryMap.Has(srcParent) {
d.directoryMap.UpdateDirSize(srcParent)
d.directoryMap.UpdateDirParents(srcParent)
}
if d.directoryMap.Has(dstParent) {
d.directoryMap.UpdateDirSize(dstParent)
d.directoryMap.UpdateDirParents(dstParent)
}
} }
return err return err
}
} else {
return err
}
} }
func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) error { func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
@ -295,6 +330,14 @@ func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) er
if err != nil { if err != nil {
return err return err
} }
if srcObj.IsDir() {
if d.directoryMap.Has(srcPath) {
d.directoryMap.DeleteDirNode(srcPath)
d.directoryMap.CalculateDirSize(dstPath)
}
}
return nil return nil
} }
@ -305,11 +348,21 @@ func (d *Local) Copy(_ context.Context, srcObj, dstDir model.Obj) error {
return fmt.Errorf("the destination folder is a subfolder of the source folder") return fmt.Errorf("the destination folder is a subfolder of the source folder")
} }
// Copy using otiai10/copy to perform more secure & efficient copy // Copy using otiai10/copy to perform more secure & efficient copy
return cp.Copy(srcPath, dstPath, cp.Options{ err := cp.Copy(srcPath, dstPath, cp.Options{
Sync: true, // Sync file to disk after copy, may have performance penalty in filesystem such as ZFS Sync: true, // Sync file to disk after copy, may have performance penalty in filesystem such as ZFS
PreserveTimes: true, PreserveTimes: true,
PreserveOwner: true, PreserveOwner: true,
}) })
if err != nil {
return err
}
if d.directoryMap.Has(filepath.Dir(dstPath)) {
d.directoryMap.UpdateDirSize(filepath.Dir(dstPath))
d.directoryMap.UpdateDirParents(filepath.Dir(dstPath))
}
return nil
} }
func (d *Local) Remove(ctx context.Context, obj model.Obj) error { func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
@ -330,6 +383,19 @@ func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
if err != nil { if err != nil {
return err return err
} }
if obj.IsDir() {
if d.directoryMap.Has(obj.GetPath()) {
d.directoryMap.DeleteDirNode(obj.GetPath())
d.directoryMap.UpdateDirSize(filepath.Dir(obj.GetPath()))
d.directoryMap.UpdateDirParents(filepath.Dir(obj.GetPath()))
}
} else {
if d.directoryMap.Has(filepath.Dir(obj.GetPath())) {
d.directoryMap.UpdateDirSize(filepath.Dir(obj.GetPath()))
d.directoryMap.UpdateDirParents(filepath.Dir(obj.GetPath()))
}
}
return nil return nil
} }
@ -353,6 +419,11 @@ func (d *Local) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
if err != nil { if err != nil {
log.Errorf("[local] failed to change time of %s: %s", fullPath, err) log.Errorf("[local] failed to change time of %s: %s", fullPath, err)
} }
if d.directoryMap.Has(dstDir.GetPath()) {
d.directoryMap.UpdateDirSize(dstDir.GetPath())
d.directoryMap.UpdateDirParents(dstDir.GetPath())
}
return nil return nil
} }

View File

@ -7,6 +7,7 @@ import (
type Addition struct { type Addition struct {
driver.RootPath driver.RootPath
DirectorySize bool `json:"directory_size" default:"false" help:"This might impact host performance"`
Thumbnail bool `json:"thumbnail" required:"true" help:"enable thumbnail"` Thumbnail bool `json:"thumbnail" required:"true" help:"enable thumbnail"`
ThumbCacheFolder string `json:"thumb_cache_folder"` ThumbCacheFolder string `json:"thumb_cache_folder"`
ThumbConcurrency string `json:"thumb_concurrency" default:"16" required:"false" help:"Number of concurrent thumbnail generation goroutines. This controls how many thumbnails can be generated in parallel."` ThumbConcurrency string `json:"thumb_concurrency" default:"16" required:"false" help:"Number of concurrent thumbnail generation goroutines. This controls how many thumbnails can be generated in parallel."`
@ -18,14 +19,17 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "Local", Name: "Local",
OnlyLocal: true, OnlyLinkMFile: false,
LocalSort: true, LocalSort: true,
NoCache: true, NoCache: true,
DefaultRoot: "/", DefaultRoot: "/",
NoLinkURL: true,
} }
func init() { func init() {
op.RegisterDriver(func() driver.Driver { op.RegisterDriver(func() driver.Driver {
return &Local{} return &Local{
directoryMap: DirectoryMap{},
}
}) })
} }

View File

@ -7,9 +7,12 @@ import (
"io/fs" "io/fs"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"slices"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"sync"
"github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
@ -19,7 +22,8 @@ import (
) )
func isSymlinkDir(f fs.FileInfo, path string) bool { func isSymlinkDir(f fs.FileInfo, path string) bool {
if f.Mode()&os.ModeSymlink == os.ModeSymlink { if f.Mode()&os.ModeSymlink == os.ModeSymlink ||
(runtime.GOOS == "windows" && f.Mode()&os.ModeIrregular == os.ModeIrregular) { // os.ModeIrregular is Junction bit in Windows
dst, err := os.Readlink(filepath.Join(path, f.Name())) dst, err := os.Readlink(filepath.Join(path, f.Name()))
if err != nil { if err != nil {
return false return false
@ -151,3 +155,253 @@ func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
} }
return &buf, nil, nil return &buf, nil, nil
} }
type DirectoryMap struct {
root string
data sync.Map
}
type DirectoryNode struct {
fileSum int64
directorySum int64
children []string
}
type DirectoryTask struct {
path string
cache *DirectoryTaskCache
}
type DirectoryTaskCache struct {
fileSum int64
children []string
}
func (m *DirectoryMap) Has(path string) bool {
_, ok := m.data.Load(path)
return ok
}
func (m *DirectoryMap) Get(path string) (*DirectoryNode, bool) {
value, ok := m.data.Load(path)
if !ok {
return &DirectoryNode{}, false
}
node, ok := value.(*DirectoryNode)
if !ok {
return &DirectoryNode{}, false
}
return node, true
}
func (m *DirectoryMap) Set(path string, node *DirectoryNode) {
m.data.Store(path, node)
}
func (m *DirectoryMap) Delete(path string) {
m.data.Delete(path)
}
func (m *DirectoryMap) Clear() {
m.data.Clear()
}
func (m *DirectoryMap) RecalculateDirSize() error {
m.Clear()
if m.root == "" {
return fmt.Errorf("root path is not set")
}
size, err := m.CalculateDirSize(m.root)
if err != nil {
return err
}
if node, ok := m.Get(m.root); ok {
node.fileSum = size
node.directorySum = size
}
return nil
}
func (m *DirectoryMap) CalculateDirSize(dirname string) (int64, error) {
stack := []DirectoryTask{
{path: dirname},
}
for len(stack) > 0 {
task := stack[len(stack)-1]
stack = stack[:len(stack)-1]
if task.cache != nil {
directorySum := int64(0)
for _, filename := range task.cache.children {
child, ok := m.Get(filepath.Join(task.path, filename))
if !ok {
return 0, fmt.Errorf("child node not found")
}
directorySum += child.fileSum + child.directorySum
}
m.Set(task.path, &DirectoryNode{
fileSum: task.cache.fileSum,
directorySum: directorySum,
children: task.cache.children,
})
continue
}
files, err := readDir(task.path)
if err != nil {
return 0, err
}
fileSum := int64(0)
directorySum := int64(0)
children := []string{}
queue := []DirectoryTask{}
for _, f := range files {
fullpath := filepath.Join(task.path, f.Name())
isFolder := f.IsDir() || isSymlinkDir(f, fullpath)
if isFolder {
if node, ok := m.Get(fullpath); ok {
directorySum += node.fileSum + node.directorySum
} else {
queue = append(queue, DirectoryTask{
path: fullpath,
})
}
children = append(children, f.Name())
} else {
fileSum += f.Size()
}
}
if len(queue) > 0 {
stack = append(stack, DirectoryTask{
path: task.path,
cache: &DirectoryTaskCache{
fileSum: fileSum,
children: children,
},
})
stack = append(stack, queue...)
continue
}
m.Set(task.path, &DirectoryNode{
fileSum: fileSum,
directorySum: directorySum,
children: children,
})
}
if node, ok := m.Get(dirname); ok {
return node.fileSum + node.directorySum, nil
}
return 0, nil
}
func (m *DirectoryMap) UpdateDirSize(dirname string) (int64, error) {
node, ok := m.Get(dirname)
if !ok {
return 0, fmt.Errorf("directory node not found")
}
files, err := readDir(dirname)
if err != nil {
return 0, err
}
fileSum := int64(0)
directorySum := int64(0)
children := []string{}
for _, f := range files {
fullpath := filepath.Join(dirname, f.Name())
isFolder := f.IsDir() || isSymlinkDir(f, fullpath)
if isFolder {
if node, ok := m.Get(fullpath); ok {
directorySum += node.fileSum + node.directorySum
} else {
value, err := m.CalculateDirSize(fullpath)
if err != nil {
return 0, err
}
directorySum += value
}
children = append(children, f.Name())
} else {
fileSum += f.Size()
}
}
for _, c := range node.children {
if !slices.Contains(children, c) {
m.DeleteDirNode(filepath.Join(dirname, c))
}
}
node.fileSum = fileSum
node.directorySum = directorySum
node.children = children
return fileSum + directorySum, nil
}
func (m *DirectoryMap) UpdateDirParents(dirname string) error {
parentPath := filepath.Dir(dirname)
for parentPath != m.root && !strings.HasPrefix(m.root, parentPath) {
if node, ok := m.Get(parentPath); ok {
directorySum := int64(0)
for _, c := range node.children {
child, ok := m.Get(filepath.Join(parentPath, c))
if !ok {
return fmt.Errorf("child node not found")
}
directorySum += child.fileSum + child.directorySum
}
node.directorySum = directorySum
}
parentPath = filepath.Dir(parentPath)
}
return nil
}
func (m *DirectoryMap) DeleteDirNode(dirname string) error {
stack := []string{dirname}
for len(stack) > 0 {
current := stack[len(stack)-1]
stack = stack[:len(stack)-1]
if node, ok := m.Get(current); ok {
for _, filename := range node.children {
stack = append(stack, filepath.Join(current, filename))
}
m.Delete(current)
}
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More