Compare commits

...

131 Commits

Author SHA1 Message Date
18a4aa1a29 fix(deps): update module gorm.io/driver/postgres to v1.6.0 2025-08-13 17:31:30 +00:00
d0c22a1ecb feat(ci): add the default user for docker image (#1036)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-12 09:51:40 +08:00
57fceabcf4 perf(stream): improve file stream range reading and caching mechanism (#1001)
* perf(stream): improve file stream range reading and caching mechanism

* 。

* add bytes_test.go

* fix(stream): handle EOF and buffer reading more gracefully

* 注释

* refactor: update CacheFullAndWriter to accept pointer for UpdateProgress

* update tests

* Update drivers/google_drive/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>

* 更优雅的克隆Link

* 修复stream已缓存但无法重复读取

* 将Bytes类型重命名为Reader

* 修复栈溢出

* update tests

---------

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-11 23:41:22 +08:00
8c244a984d refactor(assets): migrate to resource domain (#975)
* refactor(assets): migrate to resource domain

* feat(bootstrap): add migration value for logo and favicon settings
2025-08-10 09:57:33 +08:00
df479ba806 fix(aliyundrive_open): limit rate for every request (close #724) (#1011)
* fix(aliyundrive_open): limit rate for `Remove` and `MakeDir`; reduce limit for `List` and `Link` (close #724)

* Update drivers/aliyundrive_open/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: 火星大王 <34576789+huoxingdawang@users.noreply.github.com>

* Update drivers/aliyundrive_open/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: 火星大王 <34576789+huoxingdawang@users.noreply.github.com>

* fix(aliyundrive_open): limit rate for every request

* fix(aliyundrive_open): fix limiter not work on reference driver

* fix(aliyundrive_open): typo

* fix(aliyundrive_open): limiter not set to nil after free

* fix(aliyundrive_share): limit rate for every request

---------

Signed-off-by: 火星大王 <34576789+huoxingdawang@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-10 09:55:20 +08:00
5ae8e96237 feat(123_open): update Put method to return model.Obj (#1008)
* feat(123_open): update Put method to return model.Obj

* fix(123_open): declear time zones

* chore(123_open): fix typo

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123_open): use fixed timezone

* fix(123_open): implement PutResult interface for Open123 driver

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: Suyunmeng <69945917+Suyunmeng@users.noreply.github.com>
2025-08-09 15:09:12 +08:00
aa0ced47b0 fix(webdav): Handle HEAD requests for directories with appropriate headers (#1015)
Implement handling of HEAD requests for directories by setting the correct Content-Type and Content-Length headers.
2025-08-09 13:57:09 +08:00
ab747d9052 feat(config): Add PWA manifest.json endpoint for web app installation (#990)
* feat(config): Add PWA manifest.json endpoint for web app installation

* fix: Update comment to English in manifest handler

* fix: fix EOL

* fix: Remove unused fmt import from manifest handler

* feat: use site settings for manifest name and icon

* fix(manifest): Move manifest.json route to static handler for proper CDN handling

* feat: move manifest.json handler to static package and improve path handling

* feat: Add custom static file handler to prevent manifest.json conflicts

* fix: Integrate manifest.json handling into static file serving routes

* fix: Simplify PWA manifest scope handling and static file serving

- Remove CDN-specific logic for PWA manifest scope and start_url
- Always use base path for PWA scope regardless of CDN configuration
- Replace manual file serving logic with http.FileServer for static assets

* fix: Ensure consistent base path handling in site configuration and manifest path construction

* fix: Refactor trailing slash handling in site configuration

* feat(static): update manifest path handling and add route for manifest.json
2025-08-08 20:07:51 +08:00
93c06213d4 feat(local): add directory size support (#624)
* feat(local): add directory size support

* fix(local): fix and improve directory size calculation

* style(local): fix code style

* style(local): fix code style

* style(local): fix code style

* fix(local): refresh directory size when force refresh

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>

* fix:(local): Avoid traversing the parent's parent, which leads to an endless loop

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>

* fix(local:) refresh dir size only enabled

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>

* fix(local): logical error && add RecalculateDirSize && cleaner code for int64

* feat(local): add Benchmark for CalculateDirSize

* refactor(local): 优化移动中对于错误的判断。

---------

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>
Co-authored-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>
2025-08-08 16:59:16 +08:00
b9b8eed285 [skip ci]feat(ci): add FRONTEND_REPO variable to workflows and build script (#1006) 2025-08-08 16:36:22 +08:00
317d190b77 fix(ftp): create a new connection for each download (#989) 2025-08-06 20:35:01 +08:00
52d7d819ad feat(lenovonas_share): add thumb (#986) 2025-08-06 17:34:43 +08:00
0483e0f868 feat(driver_strm): also shown some files with strm (#969)
* feat(driver_strm): Also shown some files with strm

Allow user set some file types that need to shown with strm, usually subtitles

Most of code was copy and managed from drivers/alias

* 优化

* 优化

* 。

* 添加注释

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
2025-08-06 15:40:48 +08:00
08dae4f55f feat(123_open): update upload api v2 (#976) 2025-08-06 15:27:13 +08:00
9ac0484bc0 perf(ftp): improve concurrent Link response; fix alias/local driver issues (#974) 2025-08-06 13:32:37 +08:00
8cf15183a0 perf: optimize upload (#554)
* pref(115,123): optimize upload

* chore

* aliyun_open, google_drive

* fix bug

* chore

* cloudreve, cloudreve_v4, onedrive, onedrive_app

* chore(conf): add `max_buffer_limit` option

* 123pan multithread upload

* doubao

* google_drive

* chore

* chore

* chore: 计算分片数量的代码

* MaxBufferLimit自动挡

* MaxBufferLimit自动挡

* 189pc

* errorgroup添加Lifecycle

* 查缺补漏

* Conf.MaxBufferLimit单位为MB

* 。

---------

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-05 21:42:54 +08:00
c8f2aaaa55 feat(cmd): add delete command for storage (#952) 2025-08-04 17:30:43 +08:00
1208bd0a83 fix(fs): nil interface not equal to nil (#971)
https://go.dev/doc/faq#nil_error
2025-08-03 23:51:11 +08:00
6b096bcad4 fix(fs): deadlock when get link error (#963) 2025-08-02 17:49:53 +08:00
58dbf088f9 fix(fs): forget cache when get link error (#956) 2025-08-02 11:03:34 +08:00
05ff7908f2 fix(strm): encoded path is ineffective (#951) 2025-08-02 00:23:18 +08:00
a703b736c9 feat(offline_download): filter empty URLs in offline download requests (#948) 2025-08-01 16:12:21 +08:00
e458f2ab53 fix(bootstrap): add newline after initial admin password output (#943)
fix(bootstrap): add newline after initial admin  password output
2025-08-01 13:43:41 +08:00
a5a22e7085 fix(local): Treat junction as directory in Windows. (#809)
Treat junction as directory in Windows.
2025-07-31 13:54:56 +08:00
9469c95b14 fix(security): potential XSS vulnerabilities (#896) 2025-07-31 12:57:20 +08:00
cf912dcf7a fix(cmd): output to console (#920)
fix(cmd): output to terminal
2025-07-31 11:44:00 +08:00
ccd4af26e5 feat(patch): add migration from Alist V3 driver to OpenList (#919)
* feat(patch): add migration from Alist V3 driver to OpenList

* chore(patch): improve logging
2025-07-31 11:43:21 +08:00
1682e873d6 feat(search): enhanced meilisearch search experience (#864)
* feat(search): enhanced `meilisearch` search experience
- upgrade `meilisearch` dependency
- support subdirectory search
- optimize searchDocument fields for subdirectory search
- specify full index uid instead of index prefix

* fix(search): more fixes to `meilisearch`
- make use of context where context was not used
- remove code of waiting task in deletion process, as tasks are queued and will be executed orderly (if tasks were submitted to the queue successfully), which can improve `AutoUpdate` performance
2025-07-31 11:24:22 +08:00
54ae7e6d9b feat(115_open): Add GetObjInfo to accelerate getting link (#888)
* feat(115_open): Add GetObjInfo to accelerate getting link

* feat(fs): use cache directly when cache exist
2025-07-31 11:20:02 +08:00
991da7d87f feat(strm): add local mode (#885)
* feat(strm): add local mode

* Update drivers/strm/meta.go

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>

* feat(strm): local mode add sign

---------

Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-31 11:18:59 +08:00
Dgs
a498091aef fix(123&&123_share): fix link request header referer (#915) 2025-07-31 10:10:38 +08:00
976c82bb2b fix(drivers): update time-related fields to int64 (#913)
- In doubao/types.go:
  - Change LastUpdateTime from int to int64
  - Change UserCreateTime from int to int64
- In doubao_share/types.go:
  - Change CreateTime and UpdateTime from int to int64 in ShareInfo and FilePath
- In quark_uc/types.go:
  - Change UpdateTime from int to int64 in TranscodingResp

These changes ensure consistent and accurate representation of timestamp data across the project.
2025-07-31 10:10:32 +08:00
5b41a3bdff feat(ci): Add support for LoongArch64 architecture builds (#907) 2025-07-31 10:10:19 +08:00
19d1a3b785 refactor(ci): Refactor Docker build to use base images and dynamic Dockerfile generation (#904) 2025-07-30 15:04:29 +08:00
3c7b0c4999 fix(qb): Configure HTTP client with connection pooling and fix resource leaks in qBittorrent client. (#898) 2025-07-29 21:56:36 +08:00
d6867b4ab6 fix(user): show admin password on first start (#883)
* fix: fix admin password not shown in first start
* chore: add time dependence

Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>

* fix: fix log format

Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>

---------

Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>
Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
2025-07-29 21:36:27 +08:00
11cf561307 fix(security): potential XSS vulnerabilities (#880)
* fix(security): potential XSS vulnerabilities

* chore: replace alist identifier to openlist identifier

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>

---------

Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>
Co-authored-by: ILoveScratch <ilovescratch@foxmail.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-29 20:17:11 +08:00
239b58f63e fix(ci):Disable linux/s390x Docker builds (#887) 2025-07-29 16:22:50 +08:00
7da06655cb feat(setting): add site version information (#859)
* feat(setting): add site version information

* feat(conf): update conf.WebVersion to rolling

* fix(static): update condition to check conf.Version instead of conf.WebVersion

* fix(build.sh): use rolling release for web frontend in dev and beta builds

* chore(build.sh): update GitAuthor to The OpenList Projects Contributors

* fix(static): update condition to check conf.WebVersion
2025-07-29 09:49:33 +08:00
e0b3a611ba feat(thunderx,pikpak): add offline download support for ThunderX; add ctx to specific PikPak functions (#879)
* feat(thunderx,pikpak): add offline download support for ThunderX; add ctx to specific PikPak functions

* Update internal/offline_download/tool/download.go

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: 花月喵梦 <152958106+nekohy@users.noreply.github.com>

---------

Signed-off-by: 花月喵梦 <152958106+nekohy@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-29 09:46:28 +08:00
be1ad08a83 feat(ci):Add Windows 7 and LoongArch Release build support (#857)
* feat:Add Windows 7 and LoongArch old world build support (#30)

* feat:Add Windows 7 and Loongson old world build support

- Add BuildWin7() function with patched Go compiler for Windows 7 compatibility
- Add BuildLoongOldWorld() function for linux-loong64-abi1.0 target
- Create Zig-based wrapper scripts for Windows 7 cross-compilation
- Integrate new build functions into existing release workflows

* fix(win7):Add MinGW-w64 toolchain and improve LoongArch ABI isolation

- Install MinGW-w64 cross-compilation toolchain for Win7 compatibility
- Replace Zig compiler wrappers with MinGW-w64 for Windows 7 builds
- Add Go build cache cleaning to prevent LoongArch ABI1.0/ABI2.0 cross-contamination
- Force clean rebuilds (-a flag) for LoongArch builds to ensure ABI compatibility

* feat: add Windows 7 build support to beta release workflow

* feat: add LoongArch ABI2.0 support alongside existing ABI1.0 build (#31)

- Add BuildWin7() function with patched Go compiler for Windows 7 compatibility
- Add BuildLoongOldWorld() function for linux-loong64-abi1.0 target
- Create Zig-based wrapper scripts for Windows 7 cross-compilation
- Integrate new build functions into existing release workflows
- Install MinGW-w64 cross-compilation toolchain for Win7 compatibility
- Replace Zig compiler wrappers with MinGW-w64 for Windows 7 builds
- Add Go build cache cleaning to prevent LoongArch ABI1.0/ABI2.0 cross-contamination
- Force clean rebuilds (-a flag) for LoongArch builds to ensure ABI compatibility

* [skip ci]refactor:Refactor LoongArch builds to separate glibc from musl compilation

* fix(go-cache):Improve error handling for Go module cache cleaning in LoongArch builds

* feat(build): Enhance LoongArch build process with improved toolchain setup and cache management

* fix(build): Update Windows 7 target naming in build scripts and workflows

* refactor(build): Replace MinGW-w64 with Zig for Windows 7 toolchain in build scripts

* chore(cgo): remove cgo-actions subproject
2025-07-27 00:27:31 +08:00
4e9c30f49d feat(fs): full support webdav cross-driver copy and move (#823)
* fix(fs): restore webdav cross-driver copy and move

* fix bug

* webdav支持复制、移动 文件夹

* 优化

* 。
2025-07-26 00:27:46 +08:00
0ee31a3f36 fix(crypt): wrong ContentLength 2025-07-25 19:55:22 +08:00
23bddf991e feat(drivers): enable local sorting for cloudreve, ilanzou (#840)
* feat(cloudreve): enable local sorting

* feat(ilanzou): enable local sorting
2025-07-25 18:01:19 +08:00
da8d6607cf fix(static): support logo replacement (#834 Close #754) 2025-07-25 17:12:51 +08:00
6134574dac fix(fs): rename bug (#832)
* fix(fs): rename bug

* chore

* fix bug

* .

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-25 13:42:39 +08:00
b273232f87 refactor(log): redir utils.Log to logrus after init (#833) 2025-07-25 13:38:45 +08:00
358e4d851e refactor(log): filter (#816) 2025-07-25 11:33:27 +08:00
e8a1ed638a fix(ci):Exclude FreeBSD patch releases from version detection 2025-07-24 22:41:45 +08:00
4106e2a996 fix(static): correct CDN fetch condition for index.html (#814) 2025-07-24 22:28:58 +08:00
c2271df64e fix(ci): update OpenListTeam/cgo-actions to v1.2.2 to fix loongarch64 build (#811)
* Update beta_release.yml

* Update build.yml
2025-07-24 22:20:23 +08:00
d4b8570eb8 fix(docker): Fix the runsvdir permission issue caused by su-exec user switching and resolve the RUN_ARIA2 variable compatibility problem. (#805) 2025-07-24 17:22:49 +08:00
bd297e8ccc fix(deps): update module golang.org/x/image to v0.29.0 (#804)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 16:22:19 +08:00
923d282c8a fix(deps): update module github.com/sheltonzhu/115driver to v1.1.0 (#803)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 16:21:32 +08:00
4d8c4d7089 fix(deps): update module github.com/coreos/go-oidc to v2.3.0+incompatible (#586)
* fix(deps): update module github.com/coreos/go-oidc to v2.3.0+incompatible

* Update go.mod

Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>

---------

Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-24 16:21:03 +08:00
e93ab76036 feat(task-group): introduce TaskGroupCoordinator for coordinated task execution (#721)
* feat(task): add task hook,batch task
refactor(move): move use CopyTask

* Update internal/task/batch_task/refresh.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>

* fix: upload task allFinish judge

* Update internal/task/batch_task/refresh.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>

* feat: enhance concurrency safety

* 优化代码

* 解压缩

* 修复死锁

* refactor(move): move as task

* 重构,优化

* .

* 优化,修复bug

* .

* 修复bug

* feat: add task retry judge

* 代理Task.SetState函数来判断Task的生命周期

* chore: use OnSucceeded、OnFailed、OnBeforeRetry functions

* 优化

* 优化,去除重复代码

* .

* 优化

* .

* webdav

* Revert "fix(fs):After the file is copied or moved, flush the cache of the directory that was copied or moved to."

This reverts commit 5f03edd683.

---------

Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-24 16:15:24 +08:00
a9f02ecdac refactor(log):Refactor log filtering to use centralized configuration and add server-specific filtering (#798)
* feat(log):Add configurable log filtering middleware for HTTP requests

Implement a comprehensive log filtering system that allows selective suppression of HTTP request logs based on paths, methods, and prefixes. The system includes environment variable configuration support and filters health checks, WebDAV requests, and HEAD requests by default to reduce log noise.

* fix(log):Replace gin.DefaultLogFormatter with custom implementation

* Remove filtered logger test file

* fix(log):Refactor log filtering to use centralized configuration and add server-specific filtering

* fix(log):Add documentation comments for log filtering configuration
2025-07-24 16:10:47 +08:00
93849a3b5b fix(deps): update module github.com/pquerna/otp to v1.5.0 (#799)
Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-24 16:07:23 +08:00
c2e0d0c9ce fix(deps): update module github.com/protonmail/go-crypto to v1.3.0 (#800)
Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-24 16:06:50 +08:00
4a713363ee fix(deps): update module github.com/azure/azure-sdk-for-go/sdk/storage/azblob to v1.6.2 (#801)
Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-24 16:06:10 +08:00
3da8ccb7a7 fix(deps): update module github.com/rclone/rclone to v1.70.3 (#802)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 16:05:20 +08:00
676b8cff0b fix(deps): update azure-sdk-for-go monorepo (#579)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:27:36 +08:00
57cf28fc90 fix(deps): update github.com/fclairamb/ftpserverlib digest to 4a925d7 (#675)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:26:39 +08:00
8cf90e074d fix(deps): update module github.com/charmbracelet/bubbletea to v1.3.6 (#585)
Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-24 10:26:23 +08:00
74c2ed8306 fix(deps): update module github.com/charmbracelet/bubbles to v0.21.0 (#583)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:25:09 +08:00
5f03edd683 fix(fs):After the file is copied or moved, flush the cache of the directory that was copied or moved to. (#592)
* fix(fs):After the file is copied, the cache of the copied directory is refreshed

* fixed randomstring

* fixed EOL and Sync branch

chore(quark_uc): `webdav_policy` default to native_proxy

* fixed uuid and other bugs

* fixed comments

* fixed EOL

* add move refresh

* fixed builds

* fixed batch

* change betch to task.go

---------

Co-authored-by: Sumengjing <146963948+suyunjing-su@users.noreply.github.com>
2025-07-24 10:24:12 +08:00
8b65c918d4 chore(permission): admin enables webdav read-only by default (#726)
chore: admin enables webdav read-only by default
2025-07-24 10:19:49 +08:00
b5f0e3e5ee fix(deps): update module github.com/go-webauthn/webauthn to v0.13.4 (#677)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:05:44 +08:00
179894ff37 fix(deps): update module github.com/ipfs/go-cid to v0.5.0 (#680)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:05:05 +08:00
e2fc89c637 chore(deps): update dependency go to v1.24.5 (#783)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:04:20 +08:00
cacf67b181 fix(deps): update module github.com/yuin/goldmark to v1.7.13 (#794)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:04:00 +08:00
afb043e1d6 feat(docker): Change keep-alive strategy to runit, add aria2 log support (#791) 2025-07-24 09:19:33 +08:00
d9debb81ad feat(log):Add configurable log filtering middleware for HTTP requests (#782)
* feat(log):Add configurable log filtering middleware for HTTP requests

Implement a comprehensive log filtering system that allows selective suppression of HTTP request logs based on paths, methods, and prefixes. The system includes environment variable configuration support and filters health checks, WebDAV requests, and HEAD requests by default to reduce log noise.
2025-07-24 00:00:26 +08:00
4c069fddd6 fix(terabox): file upload error (#733)
* fix(terabox):fix file upload error failed to create file errno 10

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>

* fix(terabox):fix file upload error failed to create file errno 10

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>

* replace the goto statement with the retry-go package

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>

* Update util.go

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>

* Update util.go

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>

* go fmt

---------

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-23 23:42:12 +08:00
b450a2104d chore(docs): update domain (#788)
* chore(docs): update domain

* docs(issue): add guide link for bug reporting
2025-07-23 14:26:21 +08:00
7d0de17daf feat(static): fetch index.html from cdn for beta (#372)
* refactor(static): simplify folder iteration in Static function

* feat(static): disable local static when `cdn` is set

* feat(static): fetch index.html from cdn for beta

* refactor(static): use RestyClient for better retrying

* fix(static): add Accept header when fetching index.html from CDN

* refactor(static): optimize HTML replacement

* chore(static): add logging to static file system initialization

* feat(static): ensure static file redirected to CDN
2025-07-22 22:14:07 +08:00
bba4fb2203 fix(security): directory traversal (#744)
* fix(security): Directory traversal

* chore: .

* 优化

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-22 14:45:01 +08:00
a20c2020f8 fix(cmd): optimize parse of command flag --data (#777)
* Fix (cmd): optimize parse of command flag `--data`

* DBFile

* 优化

* os.Getwd()
2025-07-22 10:51:28 +08:00
a92b5eb929 refactor(cloudreve): use retry-go for net/http uploads (#773)
* refactor(cloudreve): use retry-go for uploads

* refactor(cloudreve_v4): use retry-go for uploads

* refactor(onedrive): use retry-go for uploads

* refactor(onedrive_app): use retry-go for uploads

* chore(onedrive_app): remove unnecessary error handling for host retrieval

* feat(cloudreve): move read logic inside retry block

* feat(cloudreve_v4): move read logic inside retry block

* feat(onedrive): move read logic inside retry block

* feat(onedrive_app): move read logic inside retry block
2025-07-22 10:25:04 +08:00
6817494a41 chore(ci): update cgo-actions to 1.2.1 & add patch version define for go (#779)
chore(ci): update cgo-actions to 1.2.1 & fix patch version for go
2025-07-22 09:02:07 +08:00
5a0d8ee1b8 feat(proxy): add disable proxy sign (#764)
* feat(proxy): add disable proxy sign

* Update driver.go

* GenerateDownProxyUrl

* .

* Update internal/op/driver.go

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>

* .

---------

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
2025-07-21 17:03:08 +08:00
012e51c551 fix(cloudreve_v4): remove deprecated authn check for login (#767)
* fix(cloudreve_v4): disable authn check for login

* chore(cloudreve_v4): update site login config fields
2025-07-21 15:53:10 +08:00
59ec1dbc9b feat(lenovonas_share): add option to not show root directory (#772) 2025-07-21 14:38:10 +08:00
6bb28d13f9 fix(quark): set the transcoding link ContentLength to the correct size 2025-07-20 16:40:32 +08:00
811a862288 feat(archives): add additional accepted archive extensions (#747) 2025-07-20 15:32:46 +08:00
74d32fd4d7 fix(simplehttp): logic bug when unable to parse file name (#761) 2025-07-20 14:13:30 +08:00
cedb3d488d [skip ci] chore(ci): output binary name set to openlist 2025-07-19 23:02:29 +08:00
86324d2d6b fix(net): ensure accurate content-length in response (#749)
* fix(fs): ensure accurate content-length in http2 requests

Chrome browsers were unable to preview thumbnails, reporting an
'ERR_HTTP_2_PROTOCOL_ERROR'. This was caused by an incorrect
content-length header in the server's response for thumbnail images.

This commit corrects the content-length calculation, allowing
Chrome and other compliant clients to render thumbnails correctly.

* fix(net): ensure accurate content-length in response

* 补缺

* .

---------

Co-authored-by: zhiqiang.huang <zhiqiang.tech@gmail.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-19 20:36:27 +08:00
648079ae24 remove upx (#750)
Update build.sh

Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-18 12:38:17 +08:00
Dgs
e8d45398d6 feat(quark_uc_tv): add streaming link api (#728) 2025-07-17 14:24:16 +08:00
0c461991f9 chore: standardize context keys with custom ContextKey type (#697)
* chore: standardize context keys with custom ContextKey type

* fix bug

* 使用Request.Context
2025-07-14 23:55:17 +08:00
2a4c546a8b feat: default settings api (#716)
* feat: default settings api

* fix logic bug

* chore
2025-07-14 23:41:34 +08:00
750d4eb3f6 docs(README): add disclaimer (#705)
add disclaimer
2025-07-13 15:22:25 +08:00
cc01b410a4 perf(link): optimize concurrent response (#641)
* fix(crypt): bug caused by link cache

* perf(crypt,mega,halalcloud,quark,uc): optimize concurrent response link

* chore: 删除无用代码

* ftp

* 修复bug;资源释放

* 添加SyncClosers

* local,sftp,smb

* 重构,优化,增强

* Update internal/stream/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>

* chore

* chore

* 优化,修复bug

* .

---------

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-12 17:57:54 +08:00
e5fbe72581 fix(security): add login count validation for webdav (#693) 2025-07-12 17:03:41 +08:00
283f3723d1 [skip ci] chore(ci): update openwrt hook 2025-07-12 12:06:36 +08:00
ad8c7b37a1 chore(ci):Disable duplicate build process 2025-07-12 11:49:27 +08:00
a84ffb96e9 chore(ci):Simplify the build process (#686)
* refactor(ci):Minify build files
2025-07-11 20:30:31 +08:00
19c6b6f930 feat(115_open): add offline download (#683) 2025-07-11 20:17:54 +08:00
eed3c0533c fix(deps): update module github.com/go-resty/resty/v2 to v2.16.5 (#628)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:26:44 +08:00
c72ba9828a fix(deps): update module github.com/deckarep/golang-set/v2 to v2.8.0 (#589)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:25:08 +08:00
4965a1b909 fix(deps): update module github.com/blevesearch/bleve/v2 to v2.5.2 (#582)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:24:50 +08:00
1bba550469 chore(deps): update dependency go to 1.24 (#578)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:24:23 +08:00
d678322b18 fix(deps): update module github.com/yuin/goldmark to v1.7.12 (#575)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:24:09 +08:00
efd8897bdf fix(deps): update module github.com/pkg/sftp to v1.13.9 (#574)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:23:52 +08:00
7c7cec0993 style(offline_download): add more description in log (#653)
fix(offline_download): add more description in log
2025-07-09 14:16:05 +08:00
3838ef0663 feat(traffic): update progress when caching file (#646)
* feat(traffic): update progress when caching file

* 调整参数位置和命名

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-08 21:41:45 +08:00
9e610af114 fix(115_open): upload progress error (#637) 2025-07-07 18:39:09 +08:00
0177177238 fix(crypt): pass refresh list request (close #609) 2025-07-06 13:20:42 +08:00
a77e515c9b fix(ocr): repair verification code OCR recognition service (#602)
* fix(ocr):Repair verification code OCR recognition service

* 修复对 非新用户 无效的问题

* chore: SettingItem.PreDefault重命名为MigrationValue

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-06 13:09:17 +08:00
4af16ab009 fix(115open):fix limit_rate save (#601) 2025-07-06 12:07:07 +08:00
da35423198 [skip ci] chore: go mod tidy 2025-07-06 00:55:23 +08:00
9612d61e60 chore(pkg): update singleflight 2025-07-05 13:31:47 +08:00
92f396df10 chore(quark_uc): webdav_policy default to native_proxy 2025-07-04 19:06:40 +08:00
9557834342 [skip ci] chore(net): update test 2025-07-04 18:44:52 +08:00
288ba2fcda chore(strm): remove excess parameters (#587) 2025-07-04 17:50:37 +08:00
f3920b02f7 fix(net): goroutine deadlock 2025-07-04 12:52:21 +08:00
2ec9dad3db fix(deps): update module github.com/charmbracelet/lipgloss to v0.13.1 (#449)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-04 12:16:42 +08:00
e11227fe2d fix(deps): update module github.com/otiai10/copy to v1.14.1 (#530)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-04 12:16:25 +08:00
859931b78c fix(deps): update module github.com/nwaples/rardecode/v2 to v2.1.1 (#529)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-04 11:27:14 +08:00
b591524ac3 fix(deps): update module github.com/dlclark/regexp2 to v1.11.5 (#450)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-04 11:26:53 +08:00
dc26b4fce5 fix(deps): update module github.com/aws/aws-sdk-go to v1.55.7 (#439)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-04 11:26:36 +08:00
f8cf02a2da fix(deps): update module github.com/golang-jwt/jwt/v4 to v4.5.2 (#453)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-04 11:26:11 +08:00
a214e794f4 fix(deps): update module github.com/ncw/swift/v2 to v2.0.4 (#525)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-04 11:25:52 +08:00
54d761b371 fix(deps): update module github.com/gin-contrib/cors to v1.7.6 (#451)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-04 11:20:51 +08:00
bea7a9b0e4 chore: remove deprecated trainbit drive (#563)
* chore: remove deprecated trainbit drive

* chore: go mod tidy

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-03 18:30:37 +08:00
a46f4cff18 chore(pr auto reply ci): Update PR title validation and feedback messages (#559)
Update PR title validation and feedback messages

Improves the PR title regex to be non-greedy and adds 'chore' to the allowed prefixes. Enhances feedback comments with clearer instructions in both Chinese and English, including guidance for PRs spanning multiple components.
2025-07-03 15:33:02 +08:00
8eb2d600c7 chore(issues): issue and pr auto reply (#551) 2025-07-03 13:11:39 +08:00
ffb6c2a180 refactor: optimize stream, link, and resource management (#486)
* refactor: optimize stream, link, and resource management

* Link.MFile改为io.ReadSeeker类型

* fix (crypt): read on closed response body

* chore

* chore

* chore
2025-07-03 10:39:34 +08:00
8e19a0fb07 fix(s3): logic bug (close #547 #548) 2025-07-03 10:36:34 +08:00
79f4f96217 feat(strm):add sign and encode path options (#537) 2025-07-02 21:09:57 +08:00
287 changed files with 7671 additions and 5514 deletions

View File

@ -25,11 +25,11 @@ body:
- label: |
我确认我的描述清晰,语法礼貌,能帮助开发者快速定位问题,并符合社区规则。
- label: |
我已确认阅读了[OpenList文档](https://docs.oplist.org)。
我已确认阅读了[OpenList文档](https://doc.oplist.org)。
- label: |
我已确认没有重复的问题或讨论。
- label: |
我已确认是`OpenList`的问题,而不是其他原因(例如 [网络](https://docs.oplist.org/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) `依赖`或`操作`)。
我已确认是`OpenList`的问题,而不是其他原因(例如 [网络](https://doc.oplist.org/faq/howto#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host-1) `依赖`或`操作`)。
- label: |
我认为此问题必须由`OpenList`处理,而非第三方。
- label: |
@ -72,7 +72,7 @@ body:
attributes:
label: 日志(可选)
description: |
请复制粘贴错误日志,或者截图。(可隐藏隐私字段)
请复制粘贴错误日志,或者截图。(可隐藏隐私字段) [查看方法](https://doc.oplist.org/faq/howto#%E5%A6%82%E4%BD%95%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8Dbug)
- type: textarea
id: reproduction
attributes:

View File

@ -25,11 +25,11 @@ body:
- label: |
I confirm my description is clear, polite, helps developers quickly locate the issue, and complies with community rules.
- label: |
I have read the [OpenList documentation](https://docs.oplist.org).
I have read the [OpenList documentation](https://doc.oplist.org).
- label: |
I confirm there are no duplicate issues or discussions.
- label: |
I confirm this is an `OpenList` issue, not caused by other reasons (such as [network](https://docs.oplist.org/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host), dependencies, or operation).
I confirm this is an `OpenList` issue, not caused by other reasons (such as [network](https://doc.oplist.org/faq/howto#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host-1), dependencies, or operation).
- label: |
I believe this issue must be handled by `OpenList` and not by a third party.
- label: |
@ -72,7 +72,7 @@ body:
attributes:
label: Logs (optional)
description: |
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields)
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) [Guide](https://doc.oplist.org/faq/howto#how-to-quickly-locate-bugs)
- type: textarea
id: reproduction
attributes:

View File

@ -19,7 +19,7 @@ body:
- label: |
我确认我的描述清晰,语法礼貌,能帮助开发者快速定位问题,并符合社区规则。
- label: |
我已确认阅读了[OpenList文档](https://docs.oplist.org)。
我已确认阅读了[OpenList文档](https://doc.oplist.org)。
- label: |
我已确认没有重复的问题或讨论。
- label: |

View File

@ -19,7 +19,7 @@ body:
- label: |
I confirm my description is clear, polite, helps developers quickly locate the issue, and complies with community rules.
- label: |
I have read the [OpenList documentation](https://docs.oplist.org).
I have read the [OpenList documentation](https://doc.oplist.org).
- label: |
I confirm there are no duplicate issues or discussions.
- label: |

21
.github/config.yml vendored
View File

@ -1,21 +0,0 @@
# Configuration for welcome - https://github.com/behaviorbot/welcome
# Configuration for new-issue-welcome - https://github.com/behaviorbot/new-issue-welcome
# Comment to be posted to on first time issues
newIssueWelcomeComment: >
Thanks for opening your first issue here! Be sure to follow the issue template!
# Configuration for new-pr-welcome - https://github.com/behaviorbot/new-pr-welcome
# Comment to be posted to on PRs from first time contributors in your repository
newPRWelcomeComment: >
Thanks for opening this pull request! Please check out our contributing guidelines.
# Configuration for first-pr-merge - https://github.com/behaviorbot/first-pr-merge
# Comment to be posted to on pull requests merged by a first time user
firstPRMergeComment: >
Congrats on merging your first pull request! We here at behavior bot are proud of you!
# It is recommend to include as many gifs and emojis as possible

21
.github/stale.yml vendored
View File

@ -1,21 +0,0 @@
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 44
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 20
# Issues with these labels will never be considered stale
exemptLabels:
- accepted
- security
- working
- pr-welcome
# Label to use when marking an issue as stale
staleLabel: stale
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: >
This issue was closed due to inactive more than 52 days. You can reopen or
recreate it if you think it should continue. Thank you for your contributions again.

View File

@ -14,12 +14,8 @@ permissions:
jobs:
changelog:
strategy:
matrix:
platform: [ubuntu-latest]
go-version: ["1.21"]
name: Beta Release Changelog
runs-on: ${{ matrix.platform }}
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
@ -65,7 +61,7 @@ jobs:
strategy:
matrix:
include:
- target: "!(*musl*|*windows-arm64*|*android*|*freebsd*)" # xgo
- target: "!(*musl*|*windows-arm64*|*windows7-*|*android*|*freebsd*)" # xgo and loongarch
hash: "md5"
- target: "linux-!(arm*)-musl*" #musl-not-arm
hash: "md5-linux-musl"
@ -73,6 +69,8 @@ jobs:
hash: "md5-linux-musl-arm"
- target: "windows-arm64" #win-arm64
hash: "md5-windows-arm64"
- target: "windows7-*" #win7
hash: "md5-windows7"
- target: "android-*" #android
hash: "md5-android"
- target: "freebsd-*" #freebsd
@ -89,27 +87,29 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: "1.22"
go-version: "1.24.5"
- name: Setup web
run: bash build.sh dev web
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Build
uses: OpenListTeam/cgo-actions@v1.1.2
uses: OpenListTeam/cgo-actions@v1.2.2
with:
targets: ${{ matrix.target }}
musl-target-format: $os-$musl-$arch
github-token: ${{ secrets.GITHUB_TOKEN }}
out-dir: build
output: openlist-$target$ext
musl-base-url: "https://github.com/OpenListTeam/musl-compilers/releases/latest/download/"
x-flags: |
github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=OpenList
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=The OpenList Projects Contributors <noreply@openlist.team>
github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit
github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=dev
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=rolling
- name: Compress
run: |

View File

@ -1,8 +1,6 @@
name: Test Build
on:
push:
branches: ["main"]
pull_request:
branches: ["main"]
workflow_dispatch:
@ -15,7 +13,6 @@ jobs:
build:
strategy:
matrix:
platform: [ubuntu-latest]
target:
- darwin-amd64
- darwin-arm64
@ -24,8 +21,8 @@ jobs:
- linux-amd64-musl
- windows-arm64
- android-arm64
name: Build
runs-on: ${{ matrix.platform }}
name: Build ${{ matrix.target }}
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
@ -36,28 +33,31 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: "1.22"
go-version: "1.24.5"
- name: Setup web
run: bash build.sh dev web
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Build
uses: OpenListTeam/cgo-actions@v1.1.2
uses: OpenListTeam/cgo-actions@v1.2.2
with:
targets: ${{ matrix.target }}
musl-target-format: $os-$musl-$arch
github-token: ${{ secrets.GITHUB_TOKEN }}
out-dir: build
x-flags: |
github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=OpenList
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=The OpenList Projects Contributors <noreply@openlist.team>
github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit
github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=dev
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=rolling
output: openlist$ext
- name: Upload artifact
uses: actions/upload-artifact@v4
with:
name: openlist_${{ env.SHA }}_${{ matrix.target }}
name: openlist_${{ steps.short-sha.outputs.sha }}_${{ matrix.target }}
path: build/*

View File

@ -1,4 +1,4 @@
name: Automatic changelog
name: Release Automatic changelog
on:
push:

61
.github/workflows/issue_pr_comment.yml vendored Normal file
View File

@ -0,0 +1,61 @@
name: Issue or PR Auto Reply
on:
issues:
types: [opened]
pull_request:
types: [opened]
permissions:
issues: write
pull-requests: write
jobs:
auto-reply:
runs-on: ubuntu-latest
if: github.event_name == 'issues'
steps:
- name: Check issue for unchecked tasks and reply
uses: actions/github-script@v7
with:
script: |
const issueBody = context.payload.issue.body || "";
const unchecked = /- \[ \] /.test(issueBody);
let comment = "感谢您联系OpenList。我们会尽快回复您。\n";
comment += "Thanks for contacting OpenList. We will reply to you as soon as possible.\n\n";
if (unchecked) {
comment += "由于您提出的 Issue 中包含部分未确认的项目,为了更好地管理项目,在人工审核后可能会直接关闭此问题。\n";
comment += "如果您能确认并补充相关未确认项目的信息,欢迎随时重新提交。我们会及时关注并处理。感谢您的理解与支持!\n";
comment += "Since your issue contains some unchecked tasks, it may be closed after manual review.\n";
comment += "If you can confirm and provide information for the unchecked tasks, feel free to resubmit.\n";
comment += "We will pay attention and handle it in a timely manner.\n\n";
comment += "感谢您的理解与支持!\n";
comment += "Thank you for your understanding and support!\n";
}
await github.rest.issues.createComment({
...context.repo,
issue_number: context.issue.number,
body: comment
});
pr-title-check:
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
steps:
- name: Check PR title for required prefix and comment
uses: actions/github-script@v7
with:
script: |
const title = context.payload.pull_request.title || "";
const ok = /^(feat|docs|fix|style|refactor|chore)\(.+?\): /i.test(title);
if (!ok) {
let comment = "⚠️ PR 标题需以 `feat(): `, `docs(): `, `fix(): `, `style(): `, `refactor(): `, `chore(): ` 其中之一开头,例如:`feat(component): 新增功能`。\n";
comment += "⚠️ The PR title must start with `feat(): `, `docs(): `, `fix(): `, `style(): `, or `refactor(): `, `chore(): `. For example: `feat(component): add new feature`.\n\n";
comment += "如果跨多个组件,请使用主要组件作为前缀,并在标题中枚举、描述中说明。\n";
comment += "If it spans multiple components, use the main component as the prefix and enumerate in the title, describe in the body.\n\n";
await github.rest.issues.createComment({
...context.repo,
issue_number: context.issue.number,
body: comment
});
}

View File

@ -8,24 +8,34 @@ permissions:
contents: write
jobs:
# Set release to prerelease first
prerelease:
name: Set Prerelease
runs-on: ubuntu-latest
steps:
- name: Prerelease
uses: irongut/EditRelease@v1.2.0
with:
token: ${{ secrets.GITHUB_TOKEN }}
id: ${{ github.event.release.id }}
prerelease: true
# Main release job for all platforms
release:
needs: prerelease
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
build-type: [ 'standard', 'lite' ]
target-platform: [ '', 'android', 'freebsd', 'linux_musl', 'linux_musl_arm' ]
name: Release ${{ matrix.target-platform && format('{0} ', matrix.target-platform) || '' }}${{ matrix.build-type == 'lite' && 'Lite' || '' }}
runs-on: ubuntu-latest
steps:
- name: Free Disk Space (Ubuntu)
if: matrix.target-platform == ''
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: false
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
@ -33,17 +43,10 @@ jobs:
docker-images: true
swap-storage: true
- name: Prerelease
uses: irongut/EditRelease@v1.2.0
with:
token: ${{ secrets.GITHUB_TOKEN }}
id: ${{ github.event.release.id }}
prerelease: true
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
go-version: '1.24'
- name: Checkout
uses: actions/checkout@v4
@ -51,6 +54,7 @@ jobs:
fetch-depth: 0
- name: Install dependencies
if: matrix.target-platform == ''
run: |
sudo snap install zig --classic --beta
docker pull crazymax/xgo:latest
@ -59,70 +63,10 @@ jobs:
- name: Build
run: |
bash build.sh release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*
prerelease: false
release-lite:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release Lite
runs-on: ${{ matrix.platform }}
steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: false
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- name: Prerelease
uses: irongut/EditRelease@v1.2.0
with:
token: ${{ secrets.GITHUB_TOKEN }}
id: ${{ github.event.release.id }}
prerelease: true
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install dependencies
run: |
sudo snap install zig --classic --beta
docker pull crazymax/xgo:latest
go install github.com/crazy-max/xgo@latest
sudo apt install upx
- name: Build
run: |
bash build.sh release lite
bash build.sh release ${{ matrix.build-type == 'lite' && 'lite' || '' }} ${{ matrix.target-platform }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload assets
uses: softprops/action-gh-release@v2

View File

@ -1,69 +0,0 @@
name: Release builds (Android)
on:
release:
types: [ published ]
permissions:
contents: write
jobs:
release_android:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release android
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*
release_android_lite:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release lite android
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*

View File

@ -31,11 +31,8 @@ env:
REGISTRY: ghcr.io
ARTIFACT_NAME: 'binaries_docker_release'
ARTIFACT_NAME_LITE: 'binaries_docker_release_lite'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/ppc64le,linux/riscv64,linux/loong64' ### Temporarily disable Docker builds for linux/s390x architectures for unknown reasons.
IMAGE_PUSH: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }}
IMAGE_IS_PROD: ${{ github.ref_type == 'tag' || github.event.inputs.as_latest == 'true' }}
IMAGE_TAGS_BETA: |
type=raw,value=beta,enable={{is_default_branch}}
permissions:
packages: write
@ -65,17 +62,11 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build go binary (beta)
if: env.IMAGE_IS_PROD != 'true'
run: bash build.sh beta docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build go binary (release)
if: env.IMAGE_IS_PROD == 'true'
run: bash build.sh release docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
@ -88,7 +79,7 @@ jobs:
!build/musl-libs/**
build_binary_lite:
name: Build Binaries for Docker Release
name: Build Binaries for Docker Release (Lite)
runs-on: ubuntu-latest
steps:
- name: Checkout
@ -111,17 +102,11 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build go binary (beta)
if: env.IMAGE_IS_PROD != 'true'
run: bash build.sh beta lite docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build go binary (release)
if: env.IMAGE_IS_PROD == 'true'
run: bash build.sh release lite docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
@ -142,15 +127,19 @@ jobs:
image: ["latest", "ffmpeg", "aria2", "aio"]
include:
- image: "latest"
base_image_tag: "base"
build_arg: ""
tag_favor: ""
- image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-ffmpeg,onlatest=true"
- image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-aria2,onlatest=true"
- image: "aio"
base_image_tag: "aio"
build_arg: |
INSTALL_FFMPEG=true
INSTALL_ARIA2=true
@ -181,7 +170,7 @@ jobs:
if: env.IMAGE_PUSH == 'true'
uses: docker/login-action@v3
with:
username: ${{ env.DOCKERHUB_ORG_NAME }}
username: ${{ vars.DOCKERHUB_ORG_NAME_BACKUP || env.DOCKERHUB_ORG_NAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Docker meta
@ -192,13 +181,11 @@ jobs:
${{ env.REGISTRY }}/${{ env.GHCR_ORG_NAME }}/${{ env.IMAGE_NAME }}
${{ env.DOCKERHUB_ORG_NAME }}/${{ env.IMAGE_NAME_DOCKERHUB }}
tags: >
${{ env.IMAGE_IS_PROD == 'true' && (
github.event_name == 'workflow_dispatch'
${{ github.event_name == 'workflow_dispatch'
&& format('type=raw,value={0}', github.event.inputs.manual_tag)
|| format('type=raw,value={0}', github.ref_name)
) || env.IMAGE_TAGS_BETA }}
|| format('type=raw,value={0}', github.ref_name) }}
flavor: |
latest=${{ env.IMAGE_IS_PROD }}
latest=${{ github.event_name == 'push' || github.event.inputs.as_latest == 'true' }}
${{ matrix.tag_favor }}
- name: Build and push
@ -208,29 +195,35 @@ jobs:
context: .
file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: ${{ matrix.build_arg }}
build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }}
release_docker_lite:
needs: build_binary_lite
name: Release Docker image
name: Release Docker image (Lite)
runs-on: ubuntu-latest
strategy:
matrix:
image: ["latest", "ffmpeg", "aria2", "aio"]
include:
- image: "latest"
base_image_tag: "base"
build_arg: ""
tag_favor: "suffix=-lite,onlatest=true"
- image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-lite-ffmpeg,onlatest=true"
- image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-lite-aria2,onlatest=true"
- image: "aio"
base_image_tag: "aio"
build_arg: |
INSTALL_FFMPEG=true
INSTALL_ARIA2=true
@ -261,7 +254,7 @@ jobs:
if: env.IMAGE_PUSH == 'true'
uses: docker/login-action@v3
with:
username: ${{ env.DOCKERHUB_ORG_NAME }}
username: ${{ vars.DOCKERHUB_ORG_NAME_BACKUP || env.DOCKERHUB_ORG_NAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Docker meta
@ -272,13 +265,11 @@ jobs:
${{ env.REGISTRY }}/${{ env.GHCR_ORG_NAME }}/${{ env.IMAGE_NAME }}
${{ env.DOCKERHUB_ORG_NAME }}/${{ env.IMAGE_NAME_DOCKERHUB }}
tags: >
${{ env.IMAGE_IS_PROD == 'true' && (
github.event_name == 'workflow_dispatch'
${{ github.event_name == 'workflow_dispatch'
&& format('type=raw,value={0}', github.event.inputs.manual_tag)
|| format('type=raw,value={0}', github.ref_name)
) || env.IMAGE_TAGS_BETA }}
|| format('type=raw,value={0}', github.ref_name) }}
flavor: |
latest=${{ env.IMAGE_IS_PROD }}
latest=${{ github.event_name == 'push' || github.event.inputs.as_latest == 'true' }}
${{ matrix.tag_favor }}
- name: Build and push
@ -288,7 +279,9 @@ jobs:
context: .
file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: ${{ matrix.build_arg }}
build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }}

View File

@ -1,69 +0,0 @@
name: Release builds (Freebsd)
on:
release:
types: [ published ]
permissions:
contents: write
jobs:
release_freebsd:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release freebsd
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*
release_freebsd_lite:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release lite freebsd
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*

View File

@ -1,69 +0,0 @@
name: Release builds (linux_musl)
on:
release:
types: [ published ]
permissions:
contents: write
jobs:
release_linux_musl:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release linux_musl
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*
release_linux_musl_lite:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release lite linux_musl
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*

View File

@ -1,70 +0,0 @@
name: Release builds (linux_musl_arm)
on:
release:
types: [ published ]
permissions:
contents: write
jobs:
release_linux_musl_arm:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release linux_musl_arm
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*
release_linux_musl_arm_lite:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release lite linux_musl_arm
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*

View File

@ -1,4 +1,4 @@
name: Docker Beta Release
name: Beta Release (Docker)
on:
workflow_dispatch:
@ -20,8 +20,7 @@ env:
IMAGE_NAME_DOCKERHUB: openlist
REGISTRY: ghcr.io
ARTIFACT_NAME: 'binaries_docker_release'
ARTIFACT_NAME_LITE: 'binaries_docker_release_lite'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/ppc64le,linux/riscv64,linux/loong64' ### Temporarily disable Docker builds for linux/s390x architectures for unknown reasons.
IMAGE_PUSH: ${{ github.event_name == 'push' }}
IMAGE_TAGS_BETA: |
type=ref,event=pr
@ -29,7 +28,7 @@ env:
jobs:
build_binary:
name: Build Binaries for Docker Release
name: Build Binaries for Docker Release (Beta)
runs-on: ubuntu-latest
steps:
- name: Checkout
@ -56,6 +55,7 @@ jobs:
run: bash build.sh beta docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
@ -69,7 +69,7 @@ jobs:
release_docker:
needs: build_binary
name: Release Docker image
name: Release Docker image (Beta)
runs-on: ubuntu-latest
permissions:
packages: write
@ -78,15 +78,19 @@ jobs:
image: ["latest", "ffmpeg", "aria2", "aio"]
include:
- image: "latest"
base_image_tag: "base"
build_arg: ""
tag_favor: ""
- image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-ffmpeg,onlatest=true"
- image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-aria2,onlatest=true"
- image: "aio"
base_image_tag: "aio"
build_arg: |
INSTALL_FFMPEG=true
INSTALL_ARIA2=true
@ -117,7 +121,7 @@ jobs:
if: env.IMAGE_PUSH == 'true'
uses: docker/login-action@v3
with:
username: ${{ env.DOCKERHUB_ORG_NAME }}
username: ${{ vars.DOCKERHUB_ORG_NAME_BACKUP || env.DOCKERHUB_ORG_NAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Docker meta
@ -138,7 +142,9 @@ jobs:
context: .
file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: ${{ matrix.build_arg }}
build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }}

View File

@ -19,7 +19,7 @@ jobs:
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.EXTERNAL_REPO_TOKEN_LUCI_APP_OPENLIST }}
repository: ${{ vars.HOOK_REPO || 'OpenListTeam/luci-app-openlist' }}
repository: ${{ vars.HOOK_REPO || 'OpenListTeam/OpenList-OpenWRT' }}
event-type: update-hashes
client-payload: |
{

View File

@ -1,4 +1,7 @@
FROM docker.io/library/alpine:edge AS builder
### Default image is base. You can add other support by modifying BASE_IMAGE_TAG. The following parameters are supported: base (default), aria2, ffmpeg, aio
ARG BASE_IMAGE_TAG=base
FROM alpine:edge AS builder
LABEL stage=go-builder
WORKDIR /app/
RUN apk add --no-cache bash curl jq gcc git go musl-dev
@ -7,36 +10,26 @@ RUN go mod download
COPY ./ ./
RUN bash build.sh release docker
FROM alpine:edge
FROM openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
LABEL MAINTAINER="OpenList"
ARG INSTALL_FFMPEG=false
ARG INSTALL_ARIA2=false
LABEL MAINTAINER="OpenList"
ARG USER=openlist
ARG UID=1001
ARG GID=1001
WORKDIR /opt/openlist/
RUN apk update && \
apk upgrade --no-cache && \
apk add --no-cache bash ca-certificates su-exec tzdata; \
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
mkdir -p /opt/aria2/.aria2 && \
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
touch /opt/aria2/.aria2/aria2.session && \
/opt/aria2/.aria2/tracker.sh ; \
rm -rf /var/cache/apk/*
COPY --chmod=755 --from=builder /app/bin/openlist ./
COPY --chmod=755 entrypoint.sh /entrypoint.sh
RUN adduser -u ${UID} -g ${GID} -h /opt/openlist/data -D -s /bin/sh ${USER} \
&& chown -R ${UID}:${GID} /opt \
&& chown -R ${UID}:${GID} /entrypoint.sh
USER ${USER}
RUN /entrypoint.sh version
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
VOLUME /opt/openlist/data/
EXPOSE 5244 5245
CMD [ "/entrypoint.sh" ]

View File

@ -1,34 +1,26 @@
FROM docker.io/library/alpine:edge
ARG BASE_IMAGE_TAG=base
FROM ghcr.io/openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
LABEL MAINTAINER="OpenList"
ARG TARGETPLATFORM
ARG INSTALL_FFMPEG=false
ARG INSTALL_ARIA2=false
LABEL MAINTAINER="OpenList"
ARG USER=openlist
ARG UID=1001
ARG GID=1001
WORKDIR /opt/openlist/
RUN apk update && \
apk upgrade --no-cache && \
apk add --no-cache bash ca-certificates su-exec tzdata; \
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
mkdir -p /opt/aria2/.aria2 && \
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
touch /opt/aria2/.aria2/aria2.session && \
/opt/aria2/.aria2/tracker.sh ; \
rm -rf /var/cache/apk/*
COPY --chmod=755 /build/${TARGETPLATFORM}/openlist ./
COPY --chmod=755 entrypoint.sh /entrypoint.sh
RUN adduser -u ${UID} -g ${GID} -h /opt/openlist/data -D -s /bin/sh ${USER} \
&& chown -R ${UID}:${GID} /opt \
&& chown -R ${UID}:${GID} /entrypoint.sh
USER ${USER}
RUN /entrypoint.sh version
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
VOLUME /opt/openlist/data/
EXPOSE 5244 5245
CMD [ "/entrypoint.sh" ]

View File

@ -20,6 +20,34 @@
- [CODE OF CONDUCT](./CODE_OF_CONDUCT.md)
- [LICENSE](./LICENSE)
## Disclaimer
OpenList is an open-source project independently maintained by the OpenList Team, following the AGPL-3.0 license and committed to maintaining complete code openness and modification transparency.
We have noticed the emergence of some third-party projects in the community with names similar to this project, such as OpenListApp/OpenListApp, as well as some paid proprietary software using the same or similar naming. To avoid user confusion, we hereby declare:
- OpenList has no official association with any third-party derivative projects.
- All software, code, and services of this project are maintained by the OpenList Team and are freely available on GitHub.
- Project documentation and API services primarily rely on charitable resources provided by Cloudflare. There are currently no paid plans or commercial deployments, and the use of existing features does not involve any costs.
We respect the community's rights to free use and derivative development, but we also strongly urge downstream projects:
- Should not use the "OpenList" name for impersonation promotion or commercial gain;
- Must not distribute OpenList-based code in a closed-source manner or violate AGPL license terms.
To better maintain healthy ecosystem development, we recommend:
- Clearly indicate the project source and choose appropriate open-source licenses in accordance with the open-source spirit;
- If involving commercial use, please avoid using "OpenList" or any confusing naming as the project name;
- If you need to use materials located under OpenListTeam/Logo, you may modify and use them under compliance with the agreement.
Thank you for your support and understanding of the OpenList project.
## Features
- [x] Multiple storages
@ -78,8 +106,9 @@
## Document
- 📘 [Docs & Install Guide](https://docs.oplist.org)
- 📚 [Backup Docs Site](https://docs.openlist.team)
- 📘 [Global Site](https://doc.oplist.org)
- 📚 [Backup Site](https://doc.openlist.team)
- 🌏 [CN Site](https://doc.oplist.org.cn)
## Demo

View File

@ -20,6 +20,34 @@
- [行为准则](./CODE_OF_CONDUCT.md)
- [许可证](./LICENSE)
## 免责声明
OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3.0 许可证,致力于保持完整的代码开放性和修改透明性。
我们注意到社区中出现了一些与本项目名称相似的第三方项目,如 OpenListApp/OpenListApp以及部分采用相同或近似命名的收费专有软件。为避免用户误解现声明如下
- OpenList 与任何第三方衍生项目无官方关联。
- 本项目的全部软件、代码与服务由 OpenList 团队维护,可在 GitHub 免费获取。
- 项目文档与 API 服务均主要依托于 Cloudflare 提供的公益资源,目前无任何收费计划或商业部署,现有功能使用不涉及任何支出。
我们尊重社区的自由使用与衍生开发权利,但也强烈呼吁下游项目:
- 不应以“OpenList”名义进行冒名宣传或获取商业利益
- 不得将基于 OpenList 的代码进行闭源分发或违反 AGPL 许可证条款。
为了更好地维护生态健康发展,我们建议:
- 明确注明项目来源,并以符合开源精神的方式选择适当的开源许可证;
- 如涉及商业用途请避免使用“OpenList”或任何会产生混淆的方式作为项目名称
- 若需使用本项目位于 OpenListTeam/Logo 下的素材,可在遵守协议的前提下进行修改后使用。
感谢您对 OpenList 项目的支持与理解。
## 功能
- [x] 多种存储
@ -78,8 +106,9 @@
## 文档
- 📘 [文档与安装指南](https://docs.oplist.org)
- 📚 [备用文档站点](https://docs.openlist.team)
- 🌏 [国内站点](https://doc.oplist.org.cn)
- 📘 [海外站点](https://doc.oplist.org)
- 📚 [备用站点](https://doc.openlist.team)
## 演示

View File

@ -20,6 +20,34 @@
- [行動規範](./CODE_OF_CONDUCT.md)
- [ライセンス](./LICENSE)
## 免責事項
OpenListは、OpenListチームが独立して維持するオープンソースプロジェクトであり、AGPL-3.0ライセンスに従い、完全なコードの開放性と変更の透明性を維持することに専念しています。
コミュニティ内で、OpenListApp/OpenListAppなど、本プロジェクトと類似した名称を持つサードパーティプロジェクトや、同一または類似した命名を採用する有料専有ソフトウェアが出現していることを確認しています。ユーザーの誤解を避けるため、以下のように宣言いたします
- OpenListは、いかなるサードパーティ派生プロジェクトとも公式な関連性はありません。
- 本プロジェクトのすべてのソフトウェア、コード、サービスはOpenListチームによって維持され、GitHubで無料で取得できます。
- プロジェクトドキュメントとAPIサービスは主にCloudflareが提供する公益リソースに依存しており、現在有料プランや商業展開はなく、既存機能の使用に費用は発生しません。
私たちはコミュニティの自由な使用と派生開発の権利を尊重しますが、下流プロジェクトに強く呼びかけます:
- 「OpenList」の名前で偽装宣伝や商業利益を得るべきではありません
- OpenListベースのコードをクローズドソースで配布したり、AGPLライセンス条項に違反してはいけません。
エコシステムの健全な発展をより良く維持するため、以下を推奨します:
- プロジェクトの出典を明確に示し、オープンソース精神に合致する適切なオープンソースライセンスを選択する;
- 商業用途が関わる場合は、「OpenList」や混乱を招く可能性のある名前をプロジェクト名として使用することを避ける
- OpenListTeam/Logo下の素材を使用する必要がある場合は、協定を遵守した上で修正して使用できます。
OpenListプロジェクトへのご支援とご理解をありがとうございます。
## 特徴
- [x] 複数ストレージ
@ -78,8 +106,9 @@
## ドキュメント
- 📘 [ドキュメント・インストールガイド](https://docs.oplist.org)
- 📚 [バックアップドキュメントサイト](https://docs.openlist.team)
- 📘 [グローバルサイト](https://doc.oplist.org)
- 📚 [バックアップサイト](https://doc.openlist.team)
- 🌏 [CNサイト](https://doc.oplist.org.cn)
## デモ

View File

@ -20,6 +20,34 @@
- [Gedragscode](./CODE_OF_CONDUCT.md)
- [Licentie](./LICENSE)
## Disclaimer
OpenList is een open-source project dat onafhankelijk wordt onderhouden door het OpenList Team, volgend op de AGPL-3.0 licentie en toegewijd aan het behouden van volledige code openheid en transparantie van wijzigingen.
We hebben gemerkt dat er in de gemeenschap enkele derde partij projecten zijn verschenen met namen vergelijkbaar met dit project, zoals OpenListApp/OpenListApp, evenals enkele betaalde eigendomssoftware die dezelfde of soortgelijke naamgeving gebruikt. Om verwarring bij gebruikers te voorkomen, verklaren we hierbij:
- OpenList heeft geen officiële associatie met enige derde partij afgeleide projecten.
- Alle software, code en diensten van dit project worden onderhouden door het OpenList Team en zijn gratis beschikbaar op GitHub.
- Projectdocumentatie en API diensten zijn voornamelijk afhankelijk van liefdadigheidsbronnen verstrekt door Cloudflare. Er zijn momenteel geen betaalplannen of commerciële implementaties, en het gebruik van bestaande functies brengt geen kosten met zich mee.
We respecteren de rechten van de gemeenschap voor vrij gebruik en afgeleide ontwikkeling, maar we roepen downstream projecten ook ten zeerste op:
- Mogen niet de "OpenList" naam gebruiken voor namaakpromotie of commercieel gewin;
- Mogen OpenList-gebaseerde code niet distribueren op een closed-source manier of AGPL licentievoorwaarden schenden.
Om een gezonde ecosysteemontwikkeling beter te onderhouden, bevelen we aan:
- Duidelijk de projectbron aangeven en passende open-source licenties kiezen in overeenstemming met de open-source geest;
- Bij commercieel gebruik, vermijd het gebruik van "OpenList" of enige verwarrende naamgeving als projectnaam;
- Als u materialen onder OpenListTeam/Logo moet gebruiken, kunt u deze wijzigen en gebruiken onder naleving van de overeenkomst.
Dank u voor uw ondersteuning en begrip
## Functies
- [x] Meerdere opslagmogelijkheden
@ -78,8 +106,9 @@
## Documentatie
- 📘 [Documentatie & Installatiegids](https://docs.oplist.org)
- 📚 [Back-up documentatiesite](https://docs.openlist.team)
- 📘 [Global Site](https://doc.oplist.org)
- 📚 [Backup Site](https://doc.openlist.team)
- 🌏 [CN Site](https://doc.oplist.org.cn)
## Demo

255
build.sh
View File

@ -4,6 +4,9 @@ builtAt="$(date +'%F %T %z')"
gitAuthor="The OpenList Projects Contributors <noreply@openlist.team>"
gitCommit=$(git log --pretty=format:"%h" -1)
# Set frontend repository, default to OpenListTeam/OpenList-Frontend
frontendRepo="${FRONTEND_REPO:-OpenListTeam/OpenList-Frontend}"
githubAuthArgs=""
if [ -n "$GITHUB_TOKEN" ]; then
githubAuthArgs="--header \"Authorization: Bearer $GITHUB_TOKEN\""
@ -17,15 +20,15 @@ fi
if [ "$1" = "dev" ]; then
version="dev"
webVersion="dev"
webVersion="rolling"
elif [ "$1" = "beta" ]; then
version="beta"
webVersion="dev"
webVersion="rolling"
else
git tag -d beta || true
# Always true if there's no tag
version=$(git describe --abbrev=0 --tags 2>/dev/null || echo "v0.0.0")
webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/$frontendRepo/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
fi
echo "backend version: $version"
@ -45,30 +48,21 @@ ldflags="\
-X 'github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=$webVersion' \
"
FetchWebDev() {
pre_release_tag=$(eval "curl -fsSL --max-time 2 $githubAuthArgs https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases" | jq -r 'map(select(.prerelease)) | first | .tag_name')
if [ -z "$pre_release_tag" ] || [ "$pre_release_tag" == "null" ]; then
# fall back to latest release
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"")
else
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/tags/$pre_release_tag\"")
fi
FetchWebRolling() {
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/tags/rolling\"")
pre_release_assets=$(echo "$pre_release_json" | jq -r '.assets[].browser_download_url')
if [ "$useLite" = true ]; then
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist-lite" | grep "\.tar\.gz$")
else
# There is no lite for rolling
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist" | grep -v "lite" | grep "\.tar\.gz$")
fi
curl -fsSL "$pre_release_tar_url" -o web-dist-dev.tar.gz
curl -fsSL "$pre_release_tar_url" -o dist.tar.gz
rm -rf public/dist && mkdir -p public/dist
tar -zxvf web-dist-dev.tar.gz -C public/dist
rm -rf web-dist-dev.tar.gz
tar -zxvf dist.tar.gz -C public/dist
rm -rf dist.tar.gz
}
FetchWebRelease() {
release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"")
release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/latest\"")
release_assets=$(echo "$release_json" | jq -r '.assets[].browser_download_url')
if [ "$useLite" = true ]; then
@ -95,6 +89,45 @@ BuildWinArm64() {
go build -o "$1" -ldflags="$ldflags" -tags=jsoniter .
}
BuildWin7() {
# Setup Win7 Go compiler (patched version that supports Windows 7)
go_version=$(go version | grep -o 'go[0-9]\+\.[0-9]\+\.[0-9]\+' | sed 's/go//')
echo "Detected Go version: $go_version"
curl -fsSL --retry 3 -o go-win7.zip -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/XTLS/go-win7/releases/download/patched-${go_version}/go-for-win7-linux-amd64.zip"
rm -rf go-win7
unzip go-win7.zip -d go-win7
rm go-win7.zip
# Set permissions for all wrapper files
chmod +x ./wrapper/zcc-win7
chmod +x ./wrapper/zcxx-win7
chmod +x ./wrapper/zcc-win7-386
chmod +x ./wrapper/zcxx-win7-386
# Build for both 386 and amd64 architectures
for arch in "386" "amd64"; do
echo "building for windows7-${arch}"
export GOOS=windows
export GOARCH=${arch}
export CGO_ENABLED=1
# Use architecture-specific wrapper files
if [ "$arch" = "386" ]; then
export CC=$(pwd)/wrapper/zcc-win7-386
export CXX=$(pwd)/wrapper/zcxx-win7-386
else
export CC=$(pwd)/wrapper/zcc-win7
export CXX=$(pwd)/wrapper/zcxx-win7
fi
# Use the patched Go compiler for Win7 compatibility
$(pwd)/go-win7/bin/go build -o "${1}-${arch}.exe" -ldflags="$ldflags" -tags=jsoniter .
done
}
BuildDev() {
rm -rf .git/
mkdir -p "dist"
@ -121,8 +154,8 @@ BuildDev() {
xgo -targets=windows/amd64,darwin/amd64,darwin/arm64 -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
mv "$appName"-* dist
cd dist
cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
upx -9 ./"$appName"-windows-amd64-upx.exe
# cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
# upx -9 ./"$appName"-windows-amd64-upx.exe
find . -type f -print0 | xargs -0 md5sum >md5.txt
cat md5.txt
}
@ -134,7 +167,7 @@ BuildDocker() {
PrepareBuildDockerMusl() {
mkdir -p build/musl-libs
BASE="https://github.com/OpenListTeam/musl-compilers/releases/latest/download/"
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross)
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross loongarch64-linux-musl-cross) ## Disable s390x-linux-musl-cross builds
for i in "${FILES[@]}"; do
url="${BASE}${i}.tgz"
lib_tgz="build/${i}.tgz"
@ -153,8 +186,8 @@ BuildDockerMultiplatform() {
docker_lflags="--extldflags '-static -fpic' $ldflags"
export CGO_ENABLED=1
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x linux-riscv64 linux-ppc64le)
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc)
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-riscv64 linux-ppc64le linux-loong64) ## Disable linux-s390x builds
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc loongarch64-linux-musl-gcc) ## Disable s390x-linux-musl-gcc builds
for i in "${!OS_ARCHES[@]}"; do
os_arch=${OS_ARCHES[$i]}
cgo_cc=${CGO_ARGS[$i]}
@ -186,12 +219,171 @@ BuildRelease() {
rm -rf .git/
mkdir -p "build"
BuildWinArm64 ./build/"$appName"-windows-arm64.exe
BuildWin7 ./build/"$appName"-windows7
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
# why? Because some target platforms seem to have issues with upx compression
upx -9 ./"$appName"-linux-amd64
cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
upx -9 ./"$appName"-windows-amd64-upx.exe
# upx -9 ./"$appName"-linux-amd64
# cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
# upx -9 ./"$appName"-windows-amd64-upx.exe
mv "$appName"-* build
# Build LoongArch with glibc (both old world abi1.0 and new world abi2.0)
# Separate from musl builds to avoid cache conflicts
BuildLoongGLIBC ./build/$appName-linux-loong64-abi1.0 abi1.0
BuildLoongGLIBC ./build/$appName-linux-loong64 abi2.0
}
BuildLoongGLIBC() {
local target_abi="$2"
local output_file="$1"
local oldWorldGoVersion="1.24.3"
if [ "$target_abi" = "abi1.0" ]; then
echo building for linux-loong64-abi1.0
else
echo building for linux-loong64-abi2.0
target_abi="abi2.0" # Default to abi2.0 if not specified
fi
# Note: No longer need global cache cleanup since ABI1.0 uses isolated cache directory
echo "Using optimized cache strategy: ABI1.0 has isolated cache, ABI2.0 uses standard cache"
if [ "$target_abi" = "abi1.0" ]; then
# Setup abi1.0 toolchain and patched Go compiler similar to cgo-action implementation
echo "Setting up Loongson old-world ABI1.0 toolchain and patched Go compiler..."
# Download and setup patched Go compiler for old-world
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
-o go-loong64-abi1.0.tar.gz; then
echo "Error: Failed to download patched Go compiler for old-world ABI1.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
-o go-loong64-abi1.0.tar.gz || true
fi
return 1
fi
rm -rf go-loong64-abi1.0
mkdir go-loong64-abi1.0
if ! tar -xzf go-loong64-abi1.0.tar.gz -C go-loong64-abi1.0 --strip-components=1; then
echo "Error: Failed to extract patched Go compiler"
return 1
fi
rm go-loong64-abi1.0.tar.gz
# Download and setup GCC toolchain for old-world
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/loongson-gnu-toolchain-8.3.novec-x86_64-loongarch64-linux-gnu-rc1.1.tar.xz" \
-o gcc8-loong64-abi1.0.tar.xz; then
echo "Error: Failed to download GCC toolchain for old-world ABI1.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/loongson-gnu-toolchain-8.3.novec-x86_64-loongarch64-linux-gnu-rc1.1.tar.xz" \
-o gcc8-loong64-abi1.0.tar.xz || true
fi
return 1
fi
rm -rf gcc8-loong64-abi1.0
mkdir gcc8-loong64-abi1.0
if ! tar -Jxf gcc8-loong64-abi1.0.tar.xz -C gcc8-loong64-abi1.0 --strip-components=1; then
echo "Error: Failed to extract GCC toolchain"
return 1
fi
rm gcc8-loong64-abi1.0.tar.xz
# Setup separate cache directory for ABI1.0 to avoid cache pollution
abi1_cache_dir="$(pwd)/go-loong64-abi1.0-cache"
mkdir -p "$abi1_cache_dir"
echo "Using separate cache directory for ABI1.0: $abi1_cache_dir"
# Use patched Go compiler for old-world build (critical for ABI1.0 compatibility)
echo "Building with patched Go compiler for old-world ABI1.0..."
echo "Using isolated cache directory: $abi1_cache_dir"
# Use env command to set environment variables locally without affecting global environment
if ! env GOOS=linux GOARCH=loong64 \
CC="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc" \
CXX="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++" \
CGO_ENABLED=1 \
GOCACHE="$abi1_cache_dir" \
$(pwd)/go-loong64-abi1.0/bin/go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed with patched Go compiler"
echo "Attempting retry with cache cleanup..."
env GOCACHE="$abi1_cache_dir" $(pwd)/go-loong64-abi1.0/bin/go clean -cache
if ! env GOOS=linux GOARCH=loong64 \
CC="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc" \
CXX="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++" \
CGO_ENABLED=1 \
GOCACHE="$abi1_cache_dir" \
$(pwd)/go-loong64-abi1.0/bin/go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed again after cache cleanup"
echo "Build environment details:"
echo "GOOS=linux"
echo "GOARCH=loong64"
echo "CC=$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc"
echo "CXX=$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++"
echo "CGO_ENABLED=1"
echo "GOCACHE=$abi1_cache_dir"
echo "Go version: $($(pwd)/go-loong64-abi1.0/bin/go version)"
echo "GCC version: $($(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc --version | head -1)"
return 1
fi
fi
else
# Setup abi2.0 toolchain for new world glibc build
echo "Setting up new-world ABI2.0 toolchain..."
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/cross-tools/releases/download/20250507/x86_64-cross-tools-loongarch64-unknown-linux-gnu-legacy.tar.xz" \
-o gcc12-loong64-abi2.0.tar.xz; then
echo "Error: Failed to download GCC toolchain for new-world ABI2.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/cross-tools/releases/download/20250507/x86_64-cross-tools-loongarch64-unknown-linux-gnu-legacy.tar.xz" \
-o gcc12-loong64-abi2.0.tar.xz || true
fi
return 1
fi
rm -rf gcc12-loong64-abi2.0
mkdir gcc12-loong64-abi2.0
if ! tar -Jxf gcc12-loong64-abi2.0.tar.xz -C gcc12-loong64-abi2.0 --strip-components=1; then
echo "Error: Failed to extract GCC toolchain"
return 1
fi
rm gcc12-loong64-abi2.0.tar.xz
export GOOS=linux
export GOARCH=loong64
export CC=$(pwd)/gcc12-loong64-abi2.0/bin/loongarch64-unknown-linux-gnu-gcc
export CXX=$(pwd)/gcc12-loong64-abi2.0/bin/loongarch64-unknown-linux-gnu-g++
export CGO_ENABLED=1
# Use standard Go compiler for new-world build
echo "Building with standard Go compiler for new-world ABI2.0..."
if ! go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed with standard Go compiler"
echo "Attempting retry with cache cleanup..."
go clean -cache
if ! go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed again after cache cleanup"
echo "Build environment details:"
echo "GOOS=$GOOS"
echo "GOARCH=$GOARCH"
echo "CC=$CC"
echo "CXX=$CXX"
echo "CGO_ENABLED=$CGO_ENABLED"
echo "Go version: $(go version)"
echo "GCC version: $($CC --version | head -1)"
return 1
fi
fi
fi
}
BuildReleaseLinuxMusl() {
@ -249,6 +441,7 @@ BuildReleaseLinuxMuslArm() {
done
}
BuildReleaseAndroid() {
rm -rf .git/
mkdir -p "build"
@ -278,6 +471,7 @@ BuildReleaseFreeBSD() {
freebsd_version=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/freebsd/freebsd-src/tags\"" | \
jq -r '.[].name' | \
grep '^release/14\.' | \
grep -v -- '-p[0-9]*$' | \
sort -V | \
tail -1 | \
sed 's/release\///' | \
@ -343,7 +537,7 @@ MakeRelease() {
tar -czvf compress/"$i$liteSuffix".tar.gz "$appName"
rm -f "$appName"
done
for i in $(find . -type f -name "$appName-windows-*"); do
for i in $(find . -type f \( -name "$appName-windows-*" -o -name "$appName-windows7-*" \)); do
cp "$i" "$appName".exe
zip compress/$(echo $i | sed 's/\.[^.]*$//')$liteSuffix.zip "$appName".exe
rm -f "$appName".exe
@ -390,7 +584,7 @@ for arg in "$@"; do
done
if [ "$buildType" = "dev" ]; then
FetchWebDev
FetchWebRolling
if [ "$dockerType" = "docker" ]; then
BuildDocker
elif [ "$dockerType" = "docker-multiplatform" ]; then
@ -402,7 +596,7 @@ if [ "$buildType" = "dev" ]; then
fi
elif [ "$buildType" = "release" -o "$buildType" = "beta" ]; then
if [ "$buildType" = "beta" ]; then
FetchWebDev
FetchWebRolling
else
FetchWebRelease
fi
@ -483,4 +677,5 @@ else
echo -e " $0 release"
echo -e " $0 release lite"
echo -e " $0 release docker lite"
echo -e " $0 release linux_musl"
fi

View File

@ -4,6 +4,8 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
package cmd
import (
"fmt"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
@ -24,10 +26,11 @@ var AdminCmd = &cobra.Command{
if err != nil {
utils.Log.Errorf("failed get admin user: %+v", err)
} else {
utils.Log.Infof("Admin user's username: %s", admin.Username)
utils.Log.Infof("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed")
utils.Log.Infof("You can reset the password with a random string by running [openlist admin random]")
utils.Log.Infof("You can also set a new password by running [openlist admin set NEW_PASSWORD]")
utils.Log.Infof("get admin user from CLI")
fmt.Println("Admin user's username:", admin.Username)
fmt.Println("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed")
fmt.Println("You can reset the password with a random string by running [openlist admin random]")
fmt.Println("You can also set a new password by running [openlist admin set NEW_PASSWORD]")
}
},
}
@ -36,6 +39,7 @@ var RandomPasswordCmd = &cobra.Command{
Use: "random",
Short: "Reset admin user's password to a random string",
Run: func(cmd *cobra.Command, args []string) {
utils.Log.Infof("reset admin user's password to a random string from CLI")
newPwd := random.String(8)
setAdminPassword(newPwd)
},
@ -44,12 +48,12 @@ var RandomPasswordCmd = &cobra.Command{
var SetPasswordCmd = &cobra.Command{
Use: "set",
Short: "Set admin user's password",
Run: func(cmd *cobra.Command, args []string) {
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
utils.Log.Errorf("Please enter the new password")
return
return fmt.Errorf("Please enter the new password")
}
setAdminPassword(args[0])
return nil
},
}
@ -60,7 +64,8 @@ var ShowTokenCmd = &cobra.Command{
Init()
defer Release()
token := setting.GetStr(conf.Token)
utils.Log.Infof("Admin token: %s", token)
utils.Log.Infof("show admin token from CLI")
fmt.Println("Admin token:", token)
},
}
@ -77,9 +82,10 @@ func setAdminPassword(pwd string) {
utils.Log.Errorf("failed update admin user: %+v", err)
return
}
utils.Log.Infof("admin user has been updated:")
utils.Log.Infof("username: %s", admin.Username)
utils.Log.Infof("password: %s", pwd)
utils.Log.Infof("admin user has been update from CLI")
fmt.Println("admin user has been updated:")
fmt.Println("username:", admin.Username)
fmt.Println("password:", pwd)
DelAdminCacheOnline()
}

View File

@ -4,6 +4,8 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
package cmd
import (
"fmt"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/spf13/cobra"
@ -24,7 +26,8 @@ var Cancel2FACmd = &cobra.Command{
if err != nil {
utils.Log.Errorf("failed to cancel 2FA: %+v", err)
} else {
utils.Log.Info("2FA canceled")
utils.Log.Infof("2FA is canceled from CLI")
fmt.Println("2FA canceled")
DelAdminCacheOnline()
}
}

View File

@ -16,7 +16,7 @@ var RootCmd = &cobra.Command{
Short: "A file list program that supports multiple storage.",
Long: `A file list program that supports multiple storage,
built with love by OpenListTeam.
Complete documentation is available at https://docs.openlist.team/`,
Complete documentation is available at https://doc.oplist.org/`,
}
func Execute() {

View File

@ -19,6 +19,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server"
"github.com/OpenListTeam/OpenList/v4/server/middlewares"
"github.com/OpenListTeam/sftpd-openlist"
ftpserver "github.com/fclairamb/ftpserverlib"
"github.com/gin-gonic/gin"
@ -47,7 +48,15 @@ the address is defined in config file`,
gin.SetMode(gin.ReleaseMode)
}
r := gin.New()
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
// gin log
if conf.Conf.Log.Filter.Enable {
r.Use(middlewares.FilteredLogger())
} else {
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out))
}
r.Use(gin.RecoveryWithWriter(log.StandardLogger().Out))
server.Init(r)
var httpHandler http.Handler = r
if conf.Conf.Scheme.EnableH2c {
@ -56,6 +65,7 @@ the address is defined in config file`,
var httpSrv, httpsSrv, unixSrv *http.Server
if conf.Conf.Scheme.HttpPort != -1 {
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
fmt.Printf("start HTTP server @ %s\n", httpBase)
utils.Log.Infof("start HTTP server @ %s", httpBase)
httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler}
go func() {
@ -67,6 +77,7 @@ the address is defined in config file`,
}
if conf.Conf.Scheme.HttpsPort != -1 {
httpsBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpsPort)
fmt.Printf("start HTTPS server @ %s\n", httpsBase)
utils.Log.Infof("start HTTPS server @ %s", httpsBase)
httpsSrv = &http.Server{Addr: httpsBase, Handler: r}
go func() {
@ -77,6 +88,7 @@ the address is defined in config file`,
}()
}
if conf.Conf.Scheme.UnixFile != "" {
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile)
unixSrv = &http.Server{Handler: httpHandler}
go func() {
@ -105,6 +117,7 @@ the address is defined in config file`,
s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
server.InitS3(s3r)
s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port)
fmt.Printf("start S3 server @ %s\n", s3Base)
utils.Log.Infof("start S3 server @ %s", s3Base)
go func() {
var err error
@ -129,6 +142,7 @@ the address is defined in config file`,
if err != nil {
utils.Log.Fatalf("failed to start ftp driver: %s", err.Error())
} else {
fmt.Printf("start ftp server on %s\n", conf.Conf.FTP.Listen)
utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen)
go func() {
ftpServer = ftpserver.NewFtpServer(ftpDriver)
@ -147,6 +161,7 @@ the address is defined in config file`,
if err != nil {
utils.Log.Fatalf("failed to start sftp driver: %s", err.Error())
} else {
fmt.Printf("start sftp server on %s", conf.Conf.SFTP.Listen)
utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen)
go func() {
sftpServer = sftpd.NewSftpServer(sftpDriver)

View File

@ -4,6 +4,7 @@ Copyright © 2023 NAME HERE <EMAIL ADDRESS>
package cmd
import (
"fmt"
"os"
"strconv"
@ -22,28 +23,61 @@ var storageCmd = &cobra.Command{
}
var disableStorageCmd = &cobra.Command{
Use: "disable",
Short: "Disable a storage",
Run: func(cmd *cobra.Command, args []string) {
Use: "disable [mount path]",
Short: "Disable a storage by mount path",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
utils.Log.Errorf("mount path is required")
return
return fmt.Errorf("mount path is required")
}
mountPath := args[0]
Init()
defer Release()
storage, err := db.GetStorageByMountPath(mountPath)
if err != nil {
utils.Log.Errorf("failed to query storage: %+v", err)
} else {
return fmt.Errorf("failed to query storage: %+v", err)
}
storage.Disabled = true
err = db.UpdateStorage(storage)
if err != nil {
utils.Log.Errorf("failed to update storage: %+v", err)
} else {
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath)
return fmt.Errorf("failed to update storage: %+v", err)
}
utils.Log.Infof("Storage with mount path [%s] has been disabled from CLI", mountPath)
fmt.Printf("Storage with mount path [%s] has been disabled\n", mountPath)
return nil
},
}
var deleteStorageCmd = &cobra.Command{
Use: "delete [id]",
Short: "Delete a storage by id",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return fmt.Errorf("id is required")
}
id, err := strconv.Atoi(args[0])
if err != nil {
return fmt.Errorf("id must be a number")
}
if force, _ := cmd.Flags().GetBool("force"); force {
fmt.Printf("Are you sure you want to delete storage with id [%d]? [y/N]: ", id)
var confirm string
fmt.Scanln(&confirm)
if confirm != "y" && confirm != "Y" {
fmt.Println("Delete operation cancelled.")
return nil
}
}
Init()
defer Release()
err = db.DeleteStorageById(uint(id))
if err != nil {
return fmt.Errorf("failed to delete storage by id: %+v", err)
}
utils.Log.Infof("Storage with id [%d] have been deleted from CLI", id)
fmt.Printf("Storage with id [%d] have been deleted\n", id)
return nil
},
}
@ -88,14 +122,14 @@ var storageTableHeight int
var listStorageCmd = &cobra.Command{
Use: "list",
Short: "List all storages",
Run: func(cmd *cobra.Command, args []string) {
RunE: func(cmd *cobra.Command, args []string) error {
Init()
defer Release()
storages, _, err := db.GetStorages(1, -1)
if err != nil {
utils.Log.Errorf("failed to query storages: %+v", err)
return fmt.Errorf("failed to query storages: %+v", err)
} else {
utils.Log.Infof("Found %d storages", len(storages))
fmt.Printf("Found %d storages\n", len(storages))
columns := []table.Column{
{Title: "ID", Width: 4},
{Title: "Driver", Width: 16},
@ -138,10 +172,11 @@ var listStorageCmd = &cobra.Command{
m := model{t}
if _, err := tea.NewProgram(m).Run(); err != nil {
utils.Log.Errorf("failed to run program: %+v", err)
fmt.Printf("failed to run program: %+v\n", err)
os.Exit(1)
}
}
return nil
},
}
@ -151,6 +186,8 @@ func init() {
storageCmd.AddCommand(disableStorageCmd)
storageCmd.AddCommand(listStorageCmd)
storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height")
storageCmd.AddCommand(deleteStorageCmd)
deleteStorageCmd.Flags().BoolP("force", "f", false, "Force delete without confirmation")
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command

View File

@ -186,7 +186,7 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
preHash = strings.ToUpper(preHash)
fullHash := stream.GetHash().GetHash(utils.SHA1)
if len(fullHash) != utils.SHA1.Width {
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA1)
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
if err != nil {
return nil, err
}

View File

@ -18,7 +18,6 @@ var config = driver.Config{
Name: "115 Cloud",
DefaultRoot: "0",
// OnlyProxy: true,
// OnlyLocal: true,
// NoOverwriteUpload: true,
}

View File

@ -321,7 +321,7 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
err error
)
tmpF, err := s.CacheFullInTempFile()
tmpF, err := s.CacheFullAndWriter(&up, nil)
if err != nil {
return nil, err
}

View File

@ -8,6 +8,7 @@ import (
"strings"
"time"
sdk "github.com/OpenListTeam/115-sdk-go"
"github.com/OpenListTeam/OpenList/v4/cmd/flags"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
@ -16,7 +17,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
sdk "github.com/OpenListTeam/115-sdk-go"
"golang.org/x/time/rate"
)
@ -131,6 +131,23 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}, nil
}
func (d *Open115) GetObjInfo(ctx context.Context, path string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
resp, err := d.client.GetFolderInfoByPath(ctx, path)
if err != nil {
return nil, err
}
return &Obj{
Fid: resp.FileID,
Fn: resp.FileName,
Fc: resp.FileCategory,
Sha1: resp.Sha1,
Pc: resp.PickCode,
}, nil
}
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
@ -222,7 +239,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
}
sha1 := file.GetHash().GetHash(utils.SHA1)
if len(sha1) != utils.SHA1.Width {
_, sha1, err = stream.CacheFullInTempFileAndHash(file, utils.SHA1)
_, sha1, err = stream.CacheFullAndHash(file, &up, utils.SHA1)
if err != nil {
return err
}
@ -252,6 +269,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
return err
}
if resp.Status == 2 {
up(100)
return nil
}
// 2. two way verify
@ -286,6 +304,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
return err
}
if resp.Status == 2 {
up(100)
return nil
}
}
@ -302,6 +321,22 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
return nil
}
func (d *Open115) OfflineDownload(ctx context.Context, uris []string, dstDir model.Obj) ([]string, error) {
return d.client.AddOfflineTaskURIs(ctx, uris, dstDir.GetID())
}
func (d *Open115) DeleteOfflineTask(ctx context.Context, infoHash string, deleteFiles bool) error {
return d.client.DeleteOfflineTask(ctx, infoHash, deleteFiles)
}
func (d *Open115) OfflineList(ctx context.Context) (*sdk.OfflineTaskListResp, error) {
resp, err := d.client.OfflineTaskList(ctx, 1)
if err != nil {
return nil, err
}
return resp, nil
}
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
// return nil, errs.NotImplement

View File

@ -11,23 +11,14 @@ type Addition struct {
// define other
OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
LimitRate float64 `json:"limit_rate,string" type:"float" default:"1" help:"limit all api request rate ([limit]r/1s)"`
LimitRate float64 `json:"limit_rate" type:"float" default:"1" help:"limit all api request rate ([limit]r/1s)"`
AccessToken string `json:"access_token" required:"true"`
RefreshToken string `json:"refresh_token" required:"true"`
}
var config = driver.Config{
Name: "115 Open",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
}
func init() {

View File

@ -6,12 +6,13 @@ import (
"io"
"time"
sdk "github.com/OpenListTeam/115-sdk-go"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/avast/retry-go"
sdk "github.com/OpenListTeam/115-sdk-go"
)
func calPartSize(fileSize int64) int64 {
@ -69,9 +70,6 @@ func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp
// }
func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
fileSize := stream.GetSize()
chunkSize := calPartSize(fileSize)
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
if err != nil {
return err
@ -86,6 +84,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
return err
}
fileSize := stream.GetSize()
chunkSize := calPartSize(fileSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize), &up)
if err != nil {
return err
}
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
parts := make([]oss.UploadPart, partNum)
offset := int64(0)
@ -98,10 +103,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
if i == partNum {
partSize = fileSize - (i-1)*chunkSize
}
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
err = retry.Do(func() error {
_ = rd.Reset()
rd, err := ss.GetSectionReader(offset, partSize)
if err != nil {
return err
}
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
err = retry.Do(func() error {
rd.Seek(0, io.SeekStart)
part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i))
if err != nil {
return err
@ -112,6 +120,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
ss.FreeSectionReader(rd)
if err != nil {
return err
}
@ -121,7 +130,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
} else {
offset += partSize
}
up(float64(offset) / float64(fileSize))
up(float64(offset) * 100 / float64(fileSize))
}
// callbackRespBytes := make([]byte, 1024)

View File

@ -19,11 +19,6 @@ type Addition struct {
var config = driver.Config{
Name: "115 Share",
DefaultRoot: "0",
// OnlyProxy: true,
// OnlyLocal: true,
CheckStatus: false,
Alert: "",
NoOverwriteUpload: true,
NoUpload: true,
}

View File

@ -64,14 +64,6 @@ func (d *Pan123) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if f, ok := file.(File); ok {
//var resp DownResp
var headers map[string]string
if !utils.IsLocalIPAddr(args.IP) {
headers = map[string]string{
//"X-Real-IP": "1.1.1.1",
"X-Forwarded-For": args.IP,
}
}
data := base.Json{
"driveId": 0,
"etag": f.Etag,
@ -83,25 +75,27 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}
resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetHeaders(headers)
req.SetBody(data)
}, nil)
if err != nil {
return nil, err
}
downloadUrl := utils.Json.Get(resp, "data", "DownloadUrl").ToString()
u, err := url.Parse(downloadUrl)
ou, err := url.Parse(downloadUrl)
if err != nil {
return nil, err
}
nu := u.Query().Get("params")
u_ := ou.String()
nu := ou.Query().Get("params")
if nu != "" {
du, _ := base64.StdEncoding.DecodeString(nu)
u, err = url.Parse(string(du))
u, err := url.Parse(string(du))
if err != nil {
return nil, err
}
u_ = u.String()
}
u_ := u.String()
log.Debug("download url: ", u_)
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
if err != nil {
@ -118,7 +112,7 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
}
link.Header = http.Header{
"Referer": []string{"https://www.123pan.com/"},
"Referer": []string{fmt.Sprintf("%s://%s/", ou.Scheme, ou.Host)},
}
return &link, nil
} else {
@ -188,7 +182,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
etag := file.GetHash().GetHash(utils.MD5)
var err error
if len(etag) < utils.MD5.Width {
_, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5)
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
if err != nil {
return err
}

View File

@ -12,6 +12,7 @@ type Addition struct {
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
AccessToken string
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
}
var config = driver.Config{
@ -22,6 +23,11 @@ var config = driver.Config{
func init() {
op.RegisterDriver(func() driver.Driver {
return &Pan123{}
// 新增默认选项 要在RegisterDriver初始化设置 才会对正在使用的用户生效
return &Pan123{
Addition: Addition{
UploadThread: 3,
},
}
})
}

View File

@ -6,11 +6,16 @@ import (
"io"
"net/http"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
)
@ -69,18 +74,21 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
}
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
tmpF, err := file.CacheFullInTempFile()
// fetch s3 pre signed urls
size := file.GetSize()
chunkSize := int64(16 * utils.MB)
chunkCount := 1
if size > chunkSize {
chunkCount = int((size + chunkSize - 1) / chunkSize)
}
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return err
}
// fetch s3 pre signed urls
size := file.GetSize()
chunkSize := min(size, 16*utils.MB)
chunkCount := int(size / chunkSize)
lastChunkSize := size % chunkSize
if lastChunkSize > 0 {
chunkCount++
} else {
if lastChunkSize == 0 {
lastChunkSize = chunkSize
}
// only 1 batch is allowed
@ -90,46 +98,57 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
batchSize = 10
getS3UploadUrl = d.getS3PreSignedUrls
}
thread := min(int(chunkCount), d.UploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for i := 1; i <= chunkCount; i += batchSize {
if utils.IsCanceled(ctx) {
return ctx.Err()
if utils.IsCanceled(uploadCtx) {
break
}
start := i
end := min(i+batchSize, chunkCount+1)
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end)
s3PreSignedUrls, err := getS3UploadUrl(uploadCtx, upReq, start, end)
if err != nil {
return err
}
// upload each chunk
for j := start; j < end; j++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
for cur := start; cur < end; cur++ {
if utils.IsCanceled(uploadCtx) {
break
}
offset := int64(cur-1) * chunkSize
curSize := chunkSize
if j == chunkCount {
if cur == chunkCount {
curSize = lastChunkSize
}
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.NewSectionReader(tmpF, chunkSize*int64(j-1), curSize), curSize, false, getS3UploadUrl)
var reader *stream.SectionReader
var rateLimitedRd io.Reader
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
reader, err = ss.GetSectionReader(offset, curSize)
if err != nil {
return err
}
up(float64(j) * 100 / float64(chunkCount))
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
}
// complete s3 upload
return d.completeS3(ctx, upReq, file, chunkCount > 1)
}
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader *io.SectionReader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
return nil
},
Do: func(ctx context.Context) error {
reader.Seek(0, io.SeekStart)
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
if uploadUrl == "" {
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
}
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, reader))
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, rateLimitedRd)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = curSize
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
res, err := base.HttpClient.Do(req)
@ -138,18 +157,18 @@ func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSign
}
defer res.Body.Close()
if res.StatusCode == http.StatusForbidden {
if retry {
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
}
// refresh s3 pre signed urls
singleflight.AnyGroup.Do(fmt.Sprintf("Pan123.newUpload_%p", threadG), func() (any, error) {
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
if err != nil {
return nil, err
}
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
return nil, nil
})
if err != nil {
return err
}
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
// retry
reader.Seek(0, io.SeekStart)
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
}
if res.StatusCode != http.StatusOK {
body, err := io.ReadAll(res.Body)
@ -158,5 +177,20 @@ func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSign
}
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
}
progress := 10.0 + 85.0*float64(threadG.Success())/float64(chunkCount)
up(progress)
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
})
}
}
if err := threadG.Wait(); err != nil {
return err
}
defer up(100)
// complete s3 upload
return d.completeS3(ctx, upReq, file, chunkCount > 1)
}

View File

@ -2,7 +2,9 @@ package _123_open
import (
"context"
"fmt"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
@ -95,6 +97,22 @@ func (d *Open123) Rename(ctx context.Context, srcObj model.Obj, newName string)
}
func (d *Open123) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
// 尝试使用上传+MD5秒传功能实现复制
// 1. 创建文件
// parentFileID 父目录id上传到根目录时填写 0
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
if err != nil {
return fmt.Errorf("parse parentFileID error: %v", err)
}
etag := srcObj.(File).Etag
createResp, err := d.create(parentFileId, srcObj.GetName(), etag, srcObj.GetSize(), 2, false)
if err != nil {
return err
}
// 是否秒传
if createResp.Data.Reuse {
return nil
}
return errs.NotSupport
}
@ -104,26 +122,64 @@ func (d *Open123) Remove(ctx context.Context, obj model.Obj) error {
return d.trash(fileId)
}
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// 1. 创建文件
// parentFileID 父目录id上传到根目录时填写 0
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
etag := file.GetHash().GetHash(utils.MD5)
if len(etag) < utils.MD5.Width {
_, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5)
if err != nil {
return err
return nil, fmt.Errorf("parse parentFileID error: %v", err)
}
// etag 文件md5
etag := file.GetHash().GetHash(utils.MD5)
if len(etag) < utils.MD5.Width {
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
if err != nil {
return nil, err
}
}
createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false)
if err != nil {
return err
return nil, err
}
// 是否秒传
if createResp.Data.Reuse {
return nil
// 秒传成功才会返回正确的 FileID否则为 0
if createResp.Data.FileID != 0 {
return File{
FileName: file.GetName(),
Size: file.GetSize(),
FileId: createResp.Data.FileID,
Type: 2,
Etag: etag,
}, nil
}
}
up(10)
return d.Upload(ctx, file, createResp, up)
// 2. 上传分片
err = d.Upload(ctx, file, createResp, up)
if err != nil {
return nil, err
}
// 3. 上传完毕
for range 60 {
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
// 返回错误代码未知20103文档也没有具体说
if err == nil && uploadCompleteResp.Data.Completed && uploadCompleteResp.Data.FileID != 0 {
up(100)
return File{
FileName: file.GetName(),
Size: file.GetSize(),
FileId: uploadCompleteResp.Data.FileID,
Type: 2,
Etag: etag,
}, nil
}
// 若接口返回的completed为 false 时则需间隔1秒继续轮询此接口获取上传最终结果。
time.Sleep(time.Second)
}
return nil, fmt.Errorf("upload complete timeout")
}
var _ driver.Driver = (*Open123)(nil)
var _ driver.PutResult = (*Open123)(nil)

View File

@ -73,7 +73,9 @@ func (f File) GetName() string {
}
func (f File) CreateTime() time.Time {
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.CreateAt)
// 返回的时间没有时区信息,默认 UTC+8
loc := time.FixedZone("UTC+8", 8*60*60)
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.CreateAt, loc)
if err != nil {
return time.Now()
}
@ -81,7 +83,9 @@ func (f File) CreateTime() time.Time {
}
func (f File) ModTime() time.Time {
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.UpdateAt)
// 返回的时间没有时区信息,默认 UTC+8
loc := time.FixedZone("UTC+8", 8*60*60)
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.UpdateAt, loc)
if err != nil {
return time.Now()
}
@ -154,6 +158,7 @@ type DownloadInfoResp struct {
} `json:"data"`
}
// 创建文件V2返回
type UploadCreateResp struct {
BaseResp
Data struct {
@ -161,45 +166,15 @@ type UploadCreateResp struct {
PreuploadID string `json:"preuploadID"`
Reuse bool `json:"reuse"`
SliceSize int64 `json:"sliceSize"`
Servers []string `json:"servers"`
} `json:"data"`
}
type UploadUrlResp struct {
BaseResp
Data struct {
PresignedURL string `json:"presignedURL"`
}
}
// 上传完毕V2返回
type UploadCompleteResp struct {
BaseResp
Data struct {
Async bool `json:"async"`
Completed bool `json:"completed"`
FileID int64 `json:"fileID"`
} `json:"data"`
}
type UploadAsyncResp struct {
BaseResp
Data struct {
Completed bool `json:"completed"`
FileID int64 `json:"fileID"`
} `json:"data"`
}
type UploadResp struct {
BaseResp
Data struct {
AccessKeyId string `json:"AccessKeyId"`
Bucket string `json:"Bucket"`
Key string `json:"Key"`
SecretAccessKey string `json:"SecretAccessKey"`
SessionToken string `json:"SessionToken"`
FileId int64 `json:"FileId"`
Reuse bool `json:"Reuse"`
EndPoint string `json:"EndPoint"`
StorageNode string `json:"StorageNode"`
UploadId string `json:"UploadId"`
} `json:"data"`
}

View File

@ -1,21 +1,28 @@
package _123_open
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"strconv"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
)
// 创建文件 V2
func (d *Open123) create(parentFileID int64, filename string, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) {
var resp UploadCreateResp
_, err := d.Request(UploadCreate, http.MethodPost, func(req *resty.Request) {
@ -34,21 +41,137 @@ func (d *Open123) create(parentFileID int64, filename string, etag string, size
return &resp, nil
}
func (d *Open123) url(preuploadID string, sliceNo int64) (string, error) {
// get upload url
var resp UploadUrlResp
_, err := d.Request(UploadUrl, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"preuploadId": preuploadID,
"sliceNo": sliceNo,
})
}, &resp)
// 上传分片 V2
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
uploadDomain := createResp.Data.Servers[0]
size := file.GetSize()
chunkSize := createResp.Data.SliceSize
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return "", err
}
return resp.Data.PresignedURL, nil
return err
}
uploadNums := (size + chunkSize - 1) / chunkSize
thread := min(int(uploadNums), d.UploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for partIndex := range uploadNums {
if utils.IsCanceled(uploadCtx) {
break
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片号从1开始
offset := partIndex * chunkSize
size := min(chunkSize, size-offset)
var reader *stream.SectionReader
var rateLimitedRd io.Reader
sliceMD5 := ""
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
// 每个分片一个reader
reader, err = ss.GetSectionReader(offset, size)
if err != nil {
return err
}
// 计算当前分片的MD5
sliceMD5, err = utils.HashReader(utils.MD5, reader)
if err != nil {
return err
}
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
return nil
},
Do: func(ctx context.Context) error {
// 重置分片reader位置因为HashReader、上一次失败已经读取到分片EOF
reader.Seek(0, io.SeekStart)
// 创建表单数据
var b bytes.Buffer
w := multipart.NewWriter(&b)
// 添加表单字段
err = w.WriteField("preuploadID", createResp.Data.PreuploadID)
if err != nil {
return err
}
err = w.WriteField("sliceNo", strconv.FormatInt(partNumber, 10))
if err != nil {
return err
}
err = w.WriteField("sliceMD5", sliceMD5)
if err != nil {
return err
}
// 写入文件内容
fw, err := w.CreateFormFile("slice", fmt.Sprintf("%s.part%d", file.GetName(), partNumber))
if err != nil {
return err
}
_, err = utils.CopyWithBuffer(fw, rateLimitedRd)
if err != nil {
return err
}
err = w.Close()
if err != nil {
return err
}
// 创建请求并设置header
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadDomain+"/upload/v2/file/slice", &b)
if err != nil {
return err
}
// 设置请求头
req.Header.Add("Authorization", "Bearer "+d.AccessToken)
req.Header.Add("Content-Type", w.FormDataContentType())
req.Header.Add("Platform", "open_platform")
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return fmt.Errorf("slice %d upload failed, status code: %d", partNumber, res.StatusCode)
}
var resp BaseResp
respBody, err := io.ReadAll(res.Body)
if err != nil {
return err
}
err = json.Unmarshal(respBody, &resp)
if err != nil {
return err
}
if resp.Code != 0 {
return fmt.Errorf("slice %d upload failed: %s", partNumber, resp.Message)
}
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
up(progress)
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
})
}
if err := threadG.Wait(); err != nil {
return err
}
return nil
}
// 上传完毕
func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
var resp UploadCompleteResp
_, err := d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
@ -61,91 +184,3 @@ func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
}
return &resp, nil
}
func (d *Open123) async(preuploadID string) (*UploadAsyncResp, error) {
var resp UploadAsyncResp
_, err := d.Request(UploadAsync, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"preuploadID": preuploadID,
})
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
size := file.GetSize()
chunkSize := createResp.Data.SliceSize
uploadNums := (size + chunkSize - 1) / chunkSize
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.UploadThread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for partIndex := int64(0); partIndex < uploadNums; partIndex++ {
if utils.IsCanceled(uploadCtx) {
return ctx.Err()
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片号从1开始
offset := partIndex * chunkSize
size := min(chunkSize, size-offset)
limitedReader, err := file.RangeRead(http_range.Range{
Start: offset,
Length: size})
if err != nil {
return err
}
limitedReader = driver.NewLimitedUploadStream(ctx, limitedReader)
threadG.Go(func(ctx context.Context) error {
uploadPartUrl, err := d.url(createResp.Data.PreuploadID, partNumber)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, "PUT", uploadPartUrl, limitedReader)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = size
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
_ = res.Body.Close()
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
up(progress)
return nil
})
}
if err := threadG.Wait(); err != nil {
return err
}
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
if err != nil {
return err
}
if uploadCompleteResp.Data.Async == false || uploadCompleteResp.Data.Completed {
return nil
}
for {
uploadAsyncResp, err := d.async(createResp.Data.PreuploadID)
if err != nil {
return err
}
if uploadAsyncResp.Data.Completed {
break
}
}
up(100)
return nil
}

View File

@ -19,16 +19,14 @@ var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于
AccessToken = InitApiInfo(Api+"/api/v1/access_token", 1)
RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1)
UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1)
FileList = InitApiInfo(Api+"/api/v2/file/list", 4)
FileList = InitApiInfo(Api+"/api/v2/file/list", 3)
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 0)
Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2)
Move = InitApiInfo(Api+"/api/v1/file/move", 1)
Rename = InitApiInfo(Api+"/api/v1/file/name", 1)
Trash = InitApiInfo(Api+"/api/v1/file/trash", 2)
UploadCreate = InitApiInfo(Api+"/upload/v1/file/create", 2)
UploadUrl = InitApiInfo(Api+"/upload/v1/file/get_upload_url", 0)
UploadComplete = InitApiInfo(Api+"/upload/v1/file/upload_complete", 0)
UploadAsync = InitApiInfo(Api+"/upload/v1/file/upload_async_result", 1)
UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2)
UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0)
)
func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {

View File

@ -70,14 +70,6 @@ func (d *Pan123Share) List(ctx context.Context, dir model.Obj, args model.ListAr
func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
// TODO return link of file, required
if f, ok := file.(File); ok {
//var resp DownResp
var headers map[string]string
if !utils.IsLocalIPAddr(args.IP) {
headers = map[string]string{
//"X-Real-IP": "1.1.1.1",
"X-Forwarded-For": args.IP,
}
}
data := base.Json{
"shareKey": d.ShareKey,
"SharePwd": d.SharePwd,
@ -87,25 +79,27 @@ func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkA
"size": f.Size,
}
resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetHeaders(headers)
req.SetBody(data)
}, nil)
if err != nil {
return nil, err
}
downloadUrl := utils.Json.Get(resp, "data", "DownloadURL").ToString()
u, err := url.Parse(downloadUrl)
ou, err := url.Parse(downloadUrl)
if err != nil {
return nil, err
}
nu := u.Query().Get("params")
u_ := ou.String()
nu := ou.Query().Get("params")
if nu != "" {
du, _ := base64.StdEncoding.DecodeString(nu)
u, err = url.Parse(string(du))
u, err := url.Parse(string(du))
if err != nil {
return nil, err
}
u_ = u.String()
}
u_ := u.String()
log.Debug("download url: ", u_)
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
if err != nil {
@ -122,7 +116,7 @@ func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkA
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
}
link.Header = http.Header{
"Referer": []string{"https://www.123pan.com/"},
"Referer": []string{fmt.Sprintf("%s://%s/", ou.Scheme, ou.Host)},
}
return &link, nil
}

View File

@ -17,15 +17,8 @@ type Addition struct {
var config = driver.Config{
Name: "123PanShare",
LocalSort: true,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: true,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
}
func init() {

View File

@ -522,19 +522,17 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
var err error
fullHash := stream.GetHash().GetHash(utils.SHA256)
if len(fullHash) != utils.SHA256.Width {
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA256)
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA256)
if err != nil {
return err
}
}
size := stream.GetSize()
var partSize = d.getPartSize(size)
part := size / partSize
if size%partSize > 0 {
part++
} else if part == 0 {
part = 1
partSize := d.getPartSize(size)
part := int64(1)
if size > partSize {
part = (size + partSize - 1) / partSize
}
partInfos := make([]PartInfo, 0, part)
for i := int64(0); i < part; i++ {
@ -636,11 +634,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
// Update Progress
r := io.TeeReader(limitReader, p)
req, err := http.NewRequest("PUT", uploadPartInfo.UploadUrl, r)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartInfo.UploadUrl, r)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", fmt.Sprint(partSize))
req.Header.Set("Origin", "https://yun.139.com")
@ -786,12 +783,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
size := stream.GetSize()
// Progress
p := driver.NewProgress(size, up)
var partSize = d.getPartSize(size)
part := size / partSize
if size%partSize > 0 {
part++
} else if part == 0 {
part = 1
partSize := d.getPartSize(size)
part := int64(1)
if size > partSize {
part = (size + partSize - 1) / partSize
}
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
for i := int64(0); i < part; i++ {
@ -805,12 +800,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
limitReader := io.LimitReader(rateLimited, byteSize)
// Update Progress
r := io.TeeReader(limitReader, p)
req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, resp.Data.UploadResult.RedirectionURL, r)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName()))
req.Header.Set("contentSize", strconv.FormatInt(size, 10))
req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1))

View File

@ -365,11 +365,10 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
log.Debugf("uploadData: %+v", uploadData)
requestURL := uploadData.RequestURL
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
req, err := http.NewRequestWithContext(ctx, http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
for _, v := range uploadHeaders {
i := strings.Index(v, "=")
req.Header.Set(v[0:i], v[i+1:])

View File

@ -5,17 +5,19 @@ import (
"encoding/base64"
"encoding/xml"
"fmt"
"github.com/skip2/go-qrcode"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/skip2/go-qrcode"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
@ -311,11 +313,14 @@ func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
// 旧版本上传,家庭云不支持覆盖
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, err
fileMd5 := file.GetHash().GetHash(utils.MD5)
var tempFile = file.GetFile()
var err error
if len(fileMd5) != utils.MD5.Width {
tempFile, fileMd5, err = stream.CacheFullAndHash(file, &up, utils.MD5)
} else if tempFile == nil {
tempFile, err = file.CacheFullAndWriter(&up, nil)
}
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
if err != nil {
return nil, err
}
@ -345,7 +350,7 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
}
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily)
_, err := y.put(ctx, status.FileUploadUrl, header, true, tempFile, isFamily)
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
return nil, err
}

View File

@ -7,6 +7,7 @@ import (
"encoding/hex"
"encoding/xml"
"fmt"
"hash"
"io"
"net/http"
"net/http/cookiejar"
@ -472,7 +473,7 @@ func (y *Cloud189PC) refreshSession() (err error) {
// 无法上传大小为0的文件
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
size := file.GetSize()
sliceSize := partSize(size)
sliceSize := min(size, partSize(size))
params := Params{
"parentFolderId": dstDir.GetID(),
@ -500,43 +501,71 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
return nil, err
}
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
ss, err := stream.NewStreamSectionReader(file, int(sliceSize), &up)
if err != nil {
return nil, err
}
threadG, upCtx := errgroup.NewOrderedGroupWithContext(ctx, y.uploadThread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
count := int(size / sliceSize)
count := 1
if size > sliceSize {
count = int((size + sliceSize - 1) / sliceSize)
}
lastPartSize := size % sliceSize
if lastPartSize > 0 {
count++
} else {
if lastPartSize == 0 {
lastPartSize = sliceSize
}
fileMd5 := utils.MD5.NewFunc()
silceMd5 := utils.MD5.NewFunc()
silceMd5Hexs := make([]string, 0, count)
teeReader := io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5))
byteSize := sliceSize
silceMd5 := utils.MD5.NewFunc()
var writers io.Writer = silceMd5
fileMd5Hex := file.GetHash().GetHash(utils.MD5)
var fileMd5 hash.Hash
if len(fileMd5Hex) != utils.MD5.Width {
fileMd5 = utils.MD5.NewFunc()
writers = io.MultiWriter(silceMd5, fileMd5)
}
for i := 1; i <= count; i++ {
if utils.IsCanceled(upCtx) {
break
}
offset := int64((i)-1) * sliceSize
size := sliceSize
if i == count {
byteSize = lastPartSize
size = lastPartSize
}
partInfo := ""
var reader *stream.SectionReader
var rateLimitedRd io.Reader
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
reader, err = ss.GetSectionReader(offset, size)
if err != nil {
return err
}
byteData := make([]byte, byteSize)
// 读取块
silceMd5.Reset()
if _, err := io.ReadFull(teeReader, byteData); err != io.EOF && err != nil {
return nil, err
w, err := utils.CopyWithBuffer(writers, reader)
if w != size {
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", size, w, err)
}
// 计算块md5并进行hex和base64编码
md5Bytes := silceMd5.Sum(nil)
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
partInfo = fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
threadG.Go(func(ctx context.Context) error {
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
return nil
},
Do: func(ctx context.Context) error {
reader.Seek(0, io.SeekStart)
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
if err != nil {
return err
@ -545,19 +574,26 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
// step.4 上传切片
uploadUrl := uploadUrls[0]
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily)
driver.NewLimitedUploadStream(ctx, rateLimitedRd), isFamily)
if err != nil {
return err
}
up(float64(threadG.Success()) * 100 / float64(count))
return nil
})
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
},
)
}
if err = threadG.Wait(); err != nil {
return nil, err
}
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
if fileMd5 != nil {
fileMd5Hex = strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
}
sliceMd5Hex := fileMd5Hex
if file.GetSize() > sliceSize {
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
@ -620,11 +656,12 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
cache = tmpF
}
sliceSize := partSize(size)
count := int(size / sliceSize)
count := 1
if size > sliceSize {
count = int((size + sliceSize - 1) / sliceSize)
}
lastSliceSize := size % sliceSize
if lastSliceSize > 0 {
count++
} else {
if lastSliceSize == 0 {
lastSliceSize = sliceSize
}
@ -738,7 +775,8 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
}
// step.4 上传切片
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(cache, offset, byteSize), isFamily)
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize))
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily)
if err != nil {
return err
}
@ -820,7 +858,7 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo
// 旧版本上传,家庭云不支持覆盖
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
tempFile, fileMd5, err := stream.CacheFullInTempFileAndHash(file, utils.MD5)
tempFile, fileMd5, err := stream.CacheFullAndHash(file, &up, utils.MD5)
if err != nil {
return nil, err
}

View File

@ -3,7 +3,9 @@ package alias
import (
"context"
"errors"
"fmt"
"io"
"net/url"
stdpath "path"
"strings"
@ -11,8 +13,11 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
)
type Alias struct {
@ -75,10 +80,18 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
obj, err := d.get(ctx, path, dst, sub)
if err == nil {
return obj, nil
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
if err != nil {
continue
}
return &model.Object{
Path: path,
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}, nil
}
return nil, errs.ObjectNotFound
}
@ -96,7 +109,27 @@ func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
var objs []model.Obj
fsArgs := &fs.ListArgs{NoLog: true, Refresh: args.Refresh}
for _, dst := range dsts {
tmp, err := d.list(ctx, dst, sub, fsArgs)
tmp, err := fs.List(ctx, stdpath.Join(dst, sub), fsArgs)
if err == nil {
tmp, err = utils.SliceConvert(tmp, func(obj model.Obj) (model.Obj, error) {
thumb, ok := model.GetThumb(obj)
objRes := model.Object{
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
}
if !ok {
return &objRes, nil
}
return &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
}, nil
})
}
if err == nil {
objs = append(objs, tmp...)
}
@ -110,21 +143,45 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if !ok {
return nil, errs.ObjectNotFound
}
// proxy || ftp,s3
if common.GetApiUrl(ctx) == "" {
args.Redirect = false
}
for _, dst := range dsts {
link, err := d.link(ctx, dst, sub, args)
if err == nil {
if !args.Redirect && len(link.URL) > 0 {
// 正常情况下 多并发 仅支持返回URL的驱动
// alias套娃alias 可以让crypt、mega等驱动(不返回URL的) 支持并发
reqPath := stdpath.Join(dst, sub)
link, fi, err := d.link(ctx, reqPath, args)
if err != nil {
continue
}
if link == nil {
// 重定向且需要通过代理
return &model.Link{
URL: fmt.Sprintf("%s/p%s?sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
sign.Sign(reqPath)),
}, nil
}
resultLink := *link
resultLink.SyncClosers = utils.NewSyncClosers(link)
if args.Redirect {
return &resultLink, nil
}
if resultLink.ContentLength == 0 {
resultLink.ContentLength = fi.GetSize()
}
if resultLink.MFile != nil {
return &resultLink, nil
}
if d.DownloadConcurrency > 0 {
link.Concurrency = d.DownloadConcurrency
resultLink.Concurrency = d.DownloadConcurrency
}
if d.DownloadPartSize > 0 {
link.PartSize = d.DownloadPartSize * utils.KB
}
}
return link, nil
resultLink.PartSize = d.DownloadPartSize * utils.KB
}
return &resultLink, nil
}
return nil, errs.ObjectNotFound
}
@ -166,7 +223,8 @@ func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
}
if len(srcPath) == len(dstPath) {
for i := range srcPath {
err = errors.Join(err, fs.Move(ctx, *srcPath[i], *dstPath[i]))
_, e := fs.Move(ctx, *srcPath[i], *dstPath[i])
err = errors.Join(err, e)
}
return err
} else {
@ -250,20 +308,29 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer,
reqPath, err := d.getReqPath(ctx, dstDir, true)
if err == nil {
if len(reqPath) == 1 {
return fs.PutDirectly(ctx, *reqPath[0], s)
} else {
defer s.Close()
file, err := s.CacheFullInTempFile()
storage, reqActualPath, err := op.GetStorageAndActualPath(*reqPath[0])
if err != nil {
return err
}
for _, path := range reqPath {
return op.Put(ctx, storage, reqActualPath, &stream.FileStream{
Obj: s,
Mimetype: s.GetMimetype(),
Reader: s,
}, up)
} else {
file, err := s.CacheFullAndWriter(nil, nil)
if err != nil {
return err
}
count := float64(len(reqPath) + 1)
up(100 / count)
for i, path := range reqPath {
err = errors.Join(err, fs.PutDirectly(ctx, *path, &stream.FileStream{
Obj: s,
Mimetype: s.GetMimetype(),
WebPutAsTask: s.NeedStore(),
Reader: file,
}))
up(float64(i+2) / float64(count) * 100)
_, e := file.Seek(0, io.SeekStart)
if e != nil {
return errors.Join(err, e)
@ -335,18 +402,24 @@ func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveIn
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
link, err := d.extract(ctx, dst, sub, args)
if err == nil {
if !args.Redirect && len(link.URL) > 0 {
if d.DownloadConcurrency > 0 {
link.Concurrency = d.DownloadConcurrency
reqPath := stdpath.Join(dst, sub)
link, err := d.extract(ctx, reqPath, args)
if err != nil {
continue
}
if d.DownloadPartSize > 0 {
link.PartSize = d.DownloadPartSize * utils.KB
}
}
return link, nil
if link == nil {
return &model.Link{
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}, nil
}
resultLink := *link
resultLink.SyncClosers = utils.NewSyncClosers(link)
return &resultLink, nil
}
return nil, errs.NotImplement
}

View File

@ -2,8 +2,6 @@ package alias
import (
"context"
"fmt"
"net/url"
stdpath "path"
"strings"
@ -12,8 +10,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
)
@ -54,79 +50,22 @@ func (d *Alias) getRootAndPath(path string) (string, string) {
return parts[0], parts[1]
}
func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Obj, error) {
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
if err != nil {
return nil, err
}
return &model.Object{
Path: path,
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}, nil
}
func (d *Alias) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]model.Obj, error) {
objs, err := fs.List(ctx, stdpath.Join(dst, sub), args)
// the obj must implement the model.SetPath interface
// return objs, err
if err != nil {
return nil, err
}
return utils.SliceConvert(objs, func(obj model.Obj) (model.Obj, error) {
thumb, ok := model.GetThumb(obj)
objRes := model.Object{
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
}
if !ok {
return &objRes, nil
}
return &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
}, nil
})
}
func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs) (*model.Link, error) {
reqPath := stdpath.Join(dst, sub)
// 参考 crypt 驱动
func (d *Alias) link(ctx context.Context, reqPath string, args model.LinkArgs) (*model.Link, model.Obj, error) {
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil {
return nil, err
return nil, nil, err
}
useRawLink := len(common.GetApiUrl(ctx)) == 0 // ftp、s3
if !useRawLink {
_, ok := storage.(*Alias)
useRawLink = !ok && !args.Redirect
if !args.Redirect {
return op.Link(ctx, storage, reqActualPath, args)
}
if useRawLink {
link, _, err := op.Link(ctx, storage, reqActualPath, args)
return link, err
}
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil {
return nil, err
return nil, nil, err
}
if common.ShouldProxy(storage, stdpath.Base(sub)) {
link := &model.Link{
URL: fmt.Sprintf("%s/p%s?sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
sign.Sign(reqPath)),
if common.ShouldProxy(storage, stdpath.Base(reqPath)) {
return nil, obj, nil
}
return link, nil
}
link, _, err := op.Link(ctx, storage, reqActualPath, args)
return link, err
return op.Link(ctx, storage, reqActualPath, args)
}
func (d *Alias) getReqPath(ctx context.Context, obj model.Obj, isParent bool) ([]*string, error) {
@ -197,8 +136,7 @@ func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.Arc
return nil, errs.NotImplement
}
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) {
reqPath := stdpath.Join(dst, sub)
func (d *Alias) extract(ctx context.Context, reqPath string, args model.ArchiveInnerArgs) (*model.Link, error) {
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil {
return nil, err
@ -206,20 +144,12 @@ func (d *Alias) extract(ctx context.Context, dst, sub string, args model.Archive
if _, ok := storage.(driver.ArchiveReader); !ok {
return nil, errs.NotImplement
}
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(sub)) {
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil {
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(reqPath)) {
_, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err == nil {
return nil, err
}
link := &model.Link{
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}
return link, nil
return nil, nil
}
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
return link, err

View File

@ -165,7 +165,7 @@ func (d *AliDrive) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
file := stream.FileStream{
file := &stream.FileStream{
Obj: streamer,
Reader: streamer,
Mimetype: streamer.GetMimetype(),
@ -209,7 +209,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
io.Closer
}{
Reader: io.MultiReader(buf, file),
Closer: &file,
Closer: file,
}
}
} else {
@ -297,11 +297,10 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
if d.InternalUpload {
url = partInfo.InternalUploadUrl
}
req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT))
req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, io.LimitReader(rateLimited, DEFAULT))
if err != nil {
return err
}
req = req.WithContext(ctx)
res, err := base.HttpClient.Do(req)
if err != nil {
return err

View File

@ -3,7 +3,6 @@ package aliyundrive_open
import (
"context"
"errors"
"fmt"
"net/http"
"path/filepath"
"time"
@ -13,7 +12,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/rateg"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
@ -24,8 +22,7 @@ type AliyundriveOpen struct {
DriveId string
limitList func(ctx context.Context, data base.Json) (*Files, error)
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
limiter *limiter
ref *AliyundriveOpen
}
@ -38,25 +35,23 @@ func (d *AliyundriveOpen) GetAddition() driver.Additional {
}
func (d *AliyundriveOpen) Init(ctx context.Context) error {
d.limiter = getLimiterForUser(globalLimiterUserID) // First create a globally shared limiter to limit the initial requests.
if d.LIVPDownloadFormat == "" {
d.LIVPDownloadFormat = "jpeg"
}
if d.DriveType == "" {
d.DriveType = "default"
}
res, err := d.request("/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
res, err := d.request(ctx, limiterOther, "/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
if err != nil {
d.limiter.free()
d.limiter = nil
return err
}
d.DriveId = utils.Json.Get(res, d.DriveType+"_drive_id").ToString()
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
Limit: 4,
Bucket: 1,
})
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
Limit: 1,
Bucket: 1,
})
userid := utils.Json.Get(res, "user_id").ToString()
d.limiter.free()
d.limiter = getLimiterForUser(userid) // Allocate a corresponding limiter for each user.
return nil
}
@ -70,6 +65,8 @@ func (d *AliyundriveOpen) InitReference(storage driver.Driver) error {
}
func (d *AliyundriveOpen) Drop(ctx context.Context) error {
d.limiter.free()
d.limiter = nil
d.ref = nil
return nil
}
@ -87,9 +84,6 @@ func (d *AliyundriveOpen) GetRoot(ctx context.Context) (model.Obj, error) {
}
func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if d.limitList == nil {
return nil, fmt.Errorf("driver not init")
}
files, err := d.getFiles(ctx, dir.GetID())
if err != nil {
return nil, err
@ -107,8 +101,8 @@ func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.Li
return objs, err
}
func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link, error) {
res, err := d.request("/adrive/v1.0/openFile/getDownloadUrl", http.MethodPost, func(req *resty.Request) {
func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
res, err := d.request(ctx, limiterLink, "/adrive/v1.0/openFile/getDownloadUrl", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": file.GetID(),
@ -132,17 +126,10 @@ func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link
}, nil
}
func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.limitLink == nil {
return nil, fmt.Errorf("driver not init")
}
return d.limitLink(ctx, file)
}
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
nowTime, _ := getNowTime()
newDir := File{CreatedAt: nowTime, UpdatedAt: nowTime}
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"parent_file_id": parentDir.GetID(),
@ -168,7 +155,7 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var resp MoveOrCopyResp
_, err := d.request("/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": srcObj.GetID(),
@ -198,7 +185,7 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (m
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
var newFile File
_, err := d.request("/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": srcObj.GetID(),
@ -230,7 +217,7 @@ func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
var resp MoveOrCopyResp
_, err := d.request("/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": srcObj.GetID(),
@ -256,7 +243,7 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
if d.RemoveWay == "delete" {
uri = "/adrive/v1.0/openFile/delete"
}
_, err := d.request(uri, http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, uri, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": obj.GetID(),
@ -295,7 +282,7 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
default:
return nil, errs.NotSupport
}
_, err := d.request(uri, http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, uri, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp)
})
if err != nil {

View File

@ -0,0 +1,96 @@
package aliyundrive_open
import (
"context"
"fmt"
"sync"
"golang.org/x/time/rate"
)
// See document https://www.yuque.com/aliyundrive/zpfszx/mqocg38hlxzc5vcd
// See issue https://github.com/OpenListTeam/OpenList/issues/724
// We got limit per user per app, so the limiter should be global.
type limiterType int
const (
limiterList limiterType = iota
limiterLink
limiterOther
)
const (
listRateLimit = 3.9 // 4 per second in document, but we use 3.9 per second to be safe
linkRateLimit = 0.9 // 1 per second in document, but we use 0.9 per second to be safe
otherRateLimit = 14.9 // 15 per second in document, but we use 14.9 per second to be safe
globalLimiterUserID = "" // Global limiter user ID, used to limit the initial requests.
)
type limiter struct {
usedBy int
list *rate.Limiter
link *rate.Limiter
other *rate.Limiter
}
var limiters = make(map[string]*limiter)
var limitersLock = &sync.Mutex{}
func getLimiterForUser(userid string) *limiter {
limitersLock.Lock()
defer limitersLock.Unlock()
defer func() {
// Clean up limiters that are no longer used.
for id, lim := range limiters {
if lim.usedBy <= 0 && id != globalLimiterUserID { // Do not delete the global limiter.
delete(limiters, id)
}
}
}()
if lim, ok := limiters[userid]; ok {
lim.usedBy++
return lim
}
lim := &limiter{
usedBy: 1,
list: rate.NewLimiter(rate.Limit(listRateLimit), 1),
link: rate.NewLimiter(rate.Limit(linkRateLimit), 1),
other: rate.NewLimiter(rate.Limit(otherRateLimit), 1),
}
limiters[userid] = lim
return lim
}
func (l *limiter) wait(ctx context.Context, typ limiterType) error {
if l == nil {
return fmt.Errorf("driver not init")
}
switch typ {
case limiterList:
return l.list.Wait(ctx)
case limiterLink:
return l.link.Wait(ctx)
case limiterOther:
return l.other.Wait(ctx)
default:
return fmt.Errorf("unknown limiter type")
}
}
func (l *limiter) free() {
if l == nil {
return
}
limitersLock.Lock()
defer limitersLock.Unlock()
l.usedBy--
}
func (d *AliyundriveOpen) wait(ctx context.Context, typ limiterType) error {
if d == nil {
return fmt.Errorf("driver not init")
}
if d.ref != nil {
return d.ref.wait(ctx, typ) // If this is a reference driver, wait on the reference driver.
}
return d.limiter.wait(ctx, typ)
}

View File

@ -12,6 +12,7 @@ type Addition struct {
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
UseOnlineAPI bool `json:"use_online_api" default:"true"`
AlipanType string `json:"alipan_type" required:"true" type:"select" default:"default" options:"default,alipanTV"`
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/alicloud/renewapi"`
ClientID string `json:"client_id" help:"Keep it empty if you don't have one"`
ClientSecret string `json:"client_secret" help:"Keep it empty if you don't have one"`
@ -24,12 +25,6 @@ type Addition struct {
var config = driver.Config{
Name: "AliyundriveOpen",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "root",
NoOverwriteUpload: true,
}

View File

@ -50,10 +50,10 @@ func calPartSize(fileSize int64) int64 {
return partSize
}
func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]PartInfo, error) {
func (d *AliyundriveOpen) getUploadUrl(ctx context.Context, count int, fileId, uploadId string) ([]PartInfo, error) {
partInfoList := makePartInfos(count)
var resp CreateResp
_, err := d.request("/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": fileId,
@ -69,7 +69,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
if d.InternalUpload {
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
}
req, err := http.NewRequestWithContext(ctx, "PUT", uploadUrl, r)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, r)
if err != nil {
return err
}
@ -84,10 +84,10 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
return nil
}
func (d *AliyundriveOpen) completeUpload(fileId, uploadId string) (model.Obj, error) {
func (d *AliyundriveOpen) completeUpload(ctx context.Context, fileId, uploadId string) (model.Obj, error) {
// 3. complete
var newFile File
_, err := d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": fileId,
@ -137,11 +137,8 @@ func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error
}
buf := make([]byte, length)
n, err := io.ReadFull(reader, buf)
if err == io.ErrUnexpectedEOF {
return "", fmt.Errorf("can't read data, expected=%d, got=%d", len(buf), n)
}
if err != nil {
return "", err
if n != int(length) {
return "", fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
}
return base64.StdEncoding.EncodeToString(buf), nil
}
@ -183,7 +180,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
createData["pre_hash"] = hash
}
var createResp CreateResp
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
_, err, e := d.requestReturnErrResp(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp)
})
if err != nil {
@ -194,7 +191,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
hash := stream.GetHash().GetHash(utils.SHA1)
if len(hash) != utils.SHA1.Width {
_, hash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA1)
_, hash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
if err != nil {
return nil, err
}
@ -208,7 +205,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if err != nil {
return nil, fmt.Errorf("cal proof code error: %s", err.Error())
}
_, err = d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
_, err = d.request(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp)
})
if err != nil {
@ -219,17 +216,20 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if !createResp.RapidUpload {
// 2. normal upload
log.Debugf("[aliyundive_open] normal upload")
ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize), &up)
if err != nil {
return nil, err
}
preTime := time.Now()
var offset, length int64 = 0, partSize
//var length
for i := 0; i < len(createResp.PartInfoList); i++ {
if utils.IsCanceled(ctx) {
return nil, ctx.Err()
}
// refresh upload url if 50 minutes passed
if time.Since(preTime) > 50*time.Minute {
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
createResp.PartInfoList, err = d.getUploadUrl(ctx, count, createResp.FileId, createResp.UploadId)
if err != nil {
return nil, err
}
@ -238,22 +238,19 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if remain := stream.GetSize() - offset; length > remain {
length = remain
}
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
if rapidUpload {
srd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
rd, err := ss.GetSectionReader(offset, length)
if err != nil {
return nil, err
}
rd = utils.NewMultiReadable(srd)
}
err = retry.Do(func() error {
_ = rd.Reset()
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
err = retry.Do(func() error {
rd.Seek(0, io.SeekStart)
return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i])
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
ss.FreeSectionReader(rd)
if err != nil {
return nil, err
}
@ -266,5 +263,5 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp)
// 3. complete
return d.completeUpload(createResp.FileId, createResp.UploadId)
return d.completeUpload(ctx, createResp.FileId, createResp.UploadId)
}

View File

@ -19,7 +19,7 @@ import (
// do others that not defined in Driver interface
func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
func (d *AliyundriveOpen) _refreshToken(ctx context.Context) (string, string, error) {
if d.UseOnlineAPI && d.APIAddress != "" {
u := d.APIAddress
var resp struct {
@ -27,13 +27,23 @@ func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
AccessToken string `json:"access_token"`
ErrorMessage string `json:"text"`
}
_, err := base.RestyClient.R().
// 根据AlipanType选项设置driver_txt
driverTxt := "alicloud_qr"
if d.AlipanType == "alipanTV" {
driverTxt = "alicloud_tv"
}
err := d.wait(ctx, limiterOther)
if err != nil {
return "", "", err
}
_, err = base.RestyClient.R().
SetHeader("User-Agent", "Mozilla/5.0 (Macintosh; Apple macOS 15_5) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36 Chrome/138.0.0.0 Openlist/425.6.30").
SetResult(&resp).
SetQueryParams(map[string]string{
"refresh_ui": d.RefreshToken,
"server_use": "true",
"driver_txt": "alicloud_qr",
"driver_txt": driverTxt,
}).
Get(u)
if err != nil {
@ -47,11 +57,14 @@ func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
}
return resp.RefreshToken, resp.AccessToken, nil
}
// 本地刷新逻辑,必须要求 client_id 和 client_secret
if d.ClientID == "" || d.ClientSecret == "" {
return "", "", fmt.Errorf("empty ClientID or ClientSecret")
}
err := d.wait(ctx, limiterOther)
if err != nil {
return "", "", err
}
url := API_URL + "/oauth/access_token"
//var resp base.TokenResp
var e ErrResp
@ -103,18 +116,18 @@ func getSub(token string) (string, error) {
return utils.Json.Get(bs, "sub").ToString(), nil
}
func (d *AliyundriveOpen) refreshToken() error {
func (d *AliyundriveOpen) refreshToken(ctx context.Context) error {
if d.ref != nil {
return d.ref.refreshToken()
return d.ref.refreshToken(ctx)
}
refresh, access, err := d._refreshToken()
refresh, access, err := d._refreshToken(ctx)
for i := 0; i < 3; i++ {
if err == nil {
break
} else {
log.Errorf("[ali_open] failed to refresh token: %s", err)
}
refresh, access, err = d._refreshToken()
refresh, access, err = d._refreshToken(ctx)
}
if err != nil {
return err
@ -125,12 +138,12 @@ func (d *AliyundriveOpen) refreshToken() error {
return nil
}
func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
b, err, _ := d.requestReturnErrResp(uri, method, callback, retry...)
func (d *AliyundriveOpen) request(ctx context.Context, limitTy limiterType, uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
b, err, _ := d.requestReturnErrResp(ctx, limitTy, uri, method, callback, retry...)
return b, err
}
func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
func (d *AliyundriveOpen) requestReturnErrResp(ctx context.Context, limitTy limiterType, uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
req := base.RestyClient.R()
// TODO check whether access_token is expired
req.SetHeader("Authorization", "Bearer "+d.getAccessToken())
@ -142,6 +155,10 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
}
var e ErrResp
req.SetError(&e)
err := d.wait(ctx, limitTy)
if err != nil {
return nil, err, nil
}
res, err := req.Execute(method, API_URL+uri)
if err != nil {
if res != nil {
@ -152,11 +169,11 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
isRetry := len(retry) > 0 && retry[0]
if e.Code != "" {
if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.getAccessToken() == "") {
err = d.refreshToken()
err = d.refreshToken(ctx)
if err != nil {
return nil, err, nil
}
return d.requestReturnErrResp(uri, method, callback, true)
return d.requestReturnErrResp(ctx, limitTy, uri, method, callback, true)
}
return nil, fmt.Errorf("%s:%s", e.Code, e.Message), &e
}
@ -165,7 +182,7 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
func (d *AliyundriveOpen) list(ctx context.Context, data base.Json) (*Files, error) {
var resp Files
_, err := d.request("/adrive/v1.0/openFile/list", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterList, "/adrive/v1.0/openFile/list", http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp)
})
if err != nil {
@ -194,7 +211,7 @@ func (d *AliyundriveOpen) getFiles(ctx context.Context, fileId string) ([]File,
//"video_thumbnail_width": 480,
//"image_thumbnail_width": 480,
}
resp, err := d.limitList(ctx, data)
resp, err := d.list(ctx, data)
if err != nil {
return nil, err
}

View File

@ -2,7 +2,6 @@ package aliyundrive_share
import (
"context"
"fmt"
"net/http"
"time"
@ -12,7 +11,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/rateg"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
@ -25,8 +23,7 @@ type AliyundriveShare struct {
DriveId string
cron *cron.Cron
limitList func(ctx context.Context, dir model.Obj) ([]model.Obj, error)
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
limiter *limiter
}
func (d *AliyundriveShare) Config() driver.Config {
@ -38,29 +35,26 @@ func (d *AliyundriveShare) GetAddition() driver.Additional {
}
func (d *AliyundriveShare) Init(ctx context.Context) error {
err := d.refreshToken()
d.limiter = getLimiter()
err := d.refreshToken(ctx)
if err != nil {
d.limiter.free()
d.limiter = nil
return err
}
err = d.getShareToken()
err = d.getShareToken(ctx)
if err != nil {
d.limiter.free()
d.limiter = nil
return err
}
d.cron = cron.NewCron(time.Hour * 2)
d.cron.Do(func() {
err := d.refreshToken()
err := d.refreshToken(ctx)
if err != nil {
log.Errorf("%+v", err)
}
})
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
Limit: 4,
Bucket: 1,
})
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
Limit: 1,
Bucket: 1,
})
return nil
}
@ -68,19 +62,14 @@ func (d *AliyundriveShare) Drop(ctx context.Context) error {
if d.cron != nil {
d.cron.Stop()
}
d.limiter.free()
d.limiter = nil
d.DriveId = ""
return nil
}
func (d *AliyundriveShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if d.limitList == nil {
return nil, fmt.Errorf("driver not init")
}
return d.limitList(ctx, dir)
}
func (d *AliyundriveShare) list(ctx context.Context, dir model.Obj) ([]model.Obj, error) {
files, err := d.getFiles(dir.GetID())
files, err := d.getFiles(ctx, dir.GetID())
if err != nil {
return nil, err
}
@ -90,13 +79,6 @@ func (d *AliyundriveShare) list(ctx context.Context, dir model.Obj) ([]model.Obj
}
func (d *AliyundriveShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.limitLink == nil {
return nil, fmt.Errorf("driver not init")
}
return d.limitLink(ctx, file)
}
func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Link, error) {
data := base.Json{
"drive_id": d.DriveId,
"file_id": file.GetID(),
@ -105,7 +87,7 @@ func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Lin
"share_id": d.ShareId,
}
var resp ShareLinkResp
_, err := d.request("https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterLink, "https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) {
req.SetHeader(CanaryHeaderKey, CanaryHeaderValue).SetBody(data).SetResult(&resp)
})
if err != nil {
@ -135,7 +117,7 @@ func (d *AliyundriveShare) Other(ctx context.Context, args model.OtherArgs) (int
default:
return nil, errs.NotSupport
}
_, err := d.request(url, http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, url, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp)
})
if err != nil {

View File

@ -0,0 +1,67 @@
package aliyundrive_share
import (
"context"
"fmt"
"golang.org/x/time/rate"
)
// See issue https://github.com/OpenListTeam/OpenList/issues/724
// Seems there is no limit per user.
type limiterType int
const (
limiterList limiterType = iota
limiterLink
limiterOther
)
const (
listRateLimit = 3.9 // 4 per second in document, but we use 3.9 per second to be safe
linkRateLimit = 0.9 // 1 per second in document, but we use 0.9 per second to be safe
otherRateLimit = 14.9 // 15 per second in document, but we use 14.9 per second to be safe
)
type limiter struct {
list *rate.Limiter
link *rate.Limiter
other *rate.Limiter
}
func getLimiter() *limiter {
return &limiter{
list: rate.NewLimiter(rate.Limit(listRateLimit), 1),
link: rate.NewLimiter(rate.Limit(linkRateLimit), 1),
other: rate.NewLimiter(rate.Limit(otherRateLimit), 1),
}
}
func (l *limiter) wait(ctx context.Context, typ limiterType) error {
if l == nil {
return fmt.Errorf("driver not init")
}
switch typ {
case limiterList:
return l.list.Wait(ctx)
case limiterLink:
return l.link.Wait(ctx)
case limiterOther:
return l.other.Wait(ctx)
default:
return fmt.Errorf("unknown limiter type")
}
}
func (l *limiter) free() {
}
func (d *AliyundriveShare) wait(ctx context.Context, typ limiterType) error {
if d == nil {
return fmt.Errorf("driver not init")
}
//if d.ref != nil {
// return d.ref.wait(ctx, typ) // If this is a reference driver, wait on the reference driver.
//}
return d.limiter.wait(ctx, typ)
}

View File

@ -1,6 +1,7 @@
package aliyundrive_share
import (
"context"
"errors"
"fmt"
@ -15,11 +16,15 @@ const (
CanaryHeaderValue = "client=web,app=share,version=v2.3.1"
)
func (d *AliyundriveShare) refreshToken() error {
func (d *AliyundriveShare) refreshToken(ctx context.Context) error {
err := d.wait(ctx, limiterOther)
if err != nil {
return err
}
url := "https://auth.alipan.com/v2/account/token"
var resp base.TokenResp
var e ErrorResp
_, err := base.RestyClient.R().
_, err = base.RestyClient.R().
SetBody(base.Json{"refresh_token": d.RefreshToken, "grant_type": "refresh_token"}).
SetResult(&resp).
SetError(&e).
@ -36,7 +41,11 @@ func (d *AliyundriveShare) refreshToken() error {
}
// do others that not defined in Driver interface
func (d *AliyundriveShare) getShareToken() error {
func (d *AliyundriveShare) getShareToken(ctx context.Context) error {
err := d.wait(ctx, limiterOther)
if err != nil {
return err
}
data := base.Json{
"share_id": d.ShareId,
}
@ -45,7 +54,7 @@ func (d *AliyundriveShare) getShareToken() error {
}
var e ErrorResp
var resp ShareTokenResp
_, err := base.RestyClient.R().
_, err = base.RestyClient.R().
SetResult(&resp).SetError(&e).SetBody(data).
Post("https://api.alipan.com/v2/share_link/get_share_token")
if err != nil {
@ -58,7 +67,7 @@ func (d *AliyundriveShare) getShareToken() error {
return nil
}
func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback) ([]byte, error) {
func (d *AliyundriveShare) request(ctx context.Context, limitTy limiterType, url, method string, callback base.ReqCallback) ([]byte, error) {
var e ErrorResp
req := base.RestyClient.R().
SetError(&e).
@ -71,6 +80,10 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
} else {
req.SetBody("{}")
}
err := d.wait(ctx, limitTy)
if err != nil {
return nil, err
}
resp, err := req.Execute(method, url)
if err != nil {
return nil, err
@ -78,14 +91,14 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
if e.Code != "" {
if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" {
if e.Code == "AccessTokenInvalid" {
err = d.refreshToken()
err = d.refreshToken(ctx)
} else {
err = d.getShareToken()
err = d.getShareToken(ctx)
}
if err != nil {
return nil, err
}
return d.request(url, method, callback)
return d.request(ctx, limitTy, url, method, callback)
} else {
return nil, errors.New(e.Code + ": " + e.Message)
}
@ -93,7 +106,7 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
return resp.Body(), nil
}
func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
func (d *AliyundriveShare) getFiles(ctx context.Context, fileId string) ([]File, error) {
files := make([]File, 0)
data := base.Json{
"image_thumbnail_process": "image/resize,w_160/format,jpeg",
@ -110,6 +123,10 @@ func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
if data["marker"] == "first" {
data["marker"] = ""
}
err := d.wait(ctx, limiterList)
if err != nil {
return nil, err
}
var e ErrorResp
var resp ListResp
res, err := base.RestyClient.R().
@ -123,11 +140,11 @@ func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
log.Debugf("aliyundrive share get files: %s", res.String())
if e.Code != "" {
if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" {
err = d.getShareToken()
err = d.getShareToken(ctx)
if err != nil {
return nil, err
}
return d.getFiles(fileId)
return d.getFiles(ctx, fileId)
}
return nil, errors.New(e.Message)
}

View File

@ -63,7 +63,6 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder"
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder_browser"
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunderx"
_ "github.com/OpenListTeam/OpenList/v4/drivers/trainbit"
_ "github.com/OpenListTeam/OpenList/v4/drivers/url_tree"
_ "github.com/OpenListTeam/OpenList/v4/drivers/uss"
_ "github.com/OpenListTeam/OpenList/v4/drivers/virtual"

View File

@ -203,11 +203,12 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
streamSize := stream.GetSize()
sliceSize := d.getSliceSize(streamSize)
count := int(streamSize / sliceSize)
count := 1
if streamSize > sliceSize {
count = int((streamSize + sliceSize - 1) / sliceSize)
}
lastBlockSize := streamSize % sliceSize
if lastBlockSize > 0 {
count++
} else {
if lastBlockSize == 0 {
lastBlockSize = sliceSize
}

View File

@ -262,11 +262,12 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
// 计算需要的数据
streamSize := stream.GetSize()
count := int(streamSize / DEFAULT)
count := 1
if streamSize > DEFAULT {
count = int((streamSize + DEFAULT - 1) / DEFAULT)
}
lastBlockSize := streamSize % DEFAULT
if lastBlockSize > 0 {
count++
} else {
if lastBlockSize == 0 {
lastBlockSize = DEFAULT
}

View File

@ -255,7 +255,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
},
UpdateProgress: up,
})
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, "https://pan-yz.chaoxing.com/upload", r)
if err != nil {
return err
}

View File

@ -32,7 +32,6 @@ func init() {
config: driver.Config{
Name: "ChaoXingGroupDrive",
OnlyProxy: true,
OnlyLocal: false,
DefaultRoot: "-1",
NoOverwriteUpload: true,
},

View File

@ -167,7 +167,7 @@ func (d *ChaoXing) Login() (string, error) {
return "", err
}
// Create the request
req, err := http.NewRequest("POST", "https://passport2.chaoxing.com/fanyalogin", body)
req, err := http.NewRequest(http.MethodPost, "https://passport2.chaoxing.com/fanyalogin", body)
if err != nil {
return "", err
}

View File

@ -20,6 +20,7 @@ type Addition struct {
var config = driver.Config{
Name: "Cloudreve",
DefaultRoot: "/",
LocalSort: true,
}
func init() {

View File

@ -18,8 +18,10 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/cookie"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go"
)
@ -235,13 +237,16 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
}
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadURLs[0]
credential := u.Credential
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -249,30 +254,28 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
err = retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Authorization", fmt.Sprint(credential))
req.Header.Set("User-Agent", d.getUA())
err = func() error {
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return errors.New(res.Status)
return fmt.Errorf("server error: %d", res.StatusCode)
}
body, err := io.ReadAll(res.Body)
if err != nil {
@ -287,31 +290,31 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
return errors.New(up.Msg)
}
return nil
}()
if err == nil {
retryCount = 0
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
} else {
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
time.Sleep(backoff)
}
}
return nil
}
func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadURLs[0]
var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -319,47 +322,46 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
err = retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
switch {
case res.StatusCode >= 500 && res.StatusCode <= 504:
retryCount++
if retryCount > maxRetries {
res.Body.Close()
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
return fmt.Errorf("server error: %d", res.StatusCode)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
data, _ := io.ReadAll(res.Body)
res.Body.Close()
return errors.New(string(data))
default:
res.Body.Close()
retryCount = 0
return nil
}
}, retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
}
}
// 上传成功发送回调请求
return d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
req.SetBody("{}")
@ -367,12 +369,15 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
}
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
var finish int64 = 0
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -380,19 +385,20 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
req, err := http.NewRequest("PUT", u.UploadURLs[chunk],
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
err = retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadURLs[chunk],
driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
@ -401,24 +407,25 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
res.Body.Close()
switch {
case res.StatusCode != 200:
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-S3] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
return fmt.Errorf("server error: %d", res.StatusCode)
case etag == "":
return errors.New("failed to get ETag from header")
default:
retryCount = 0
etags = append(etags, etag)
return nil
}
}, retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
}
}
// s3LikeFinishUpload
// https://github.com/cloudreve/frontend/blob/b485bf297974cbe4834d2e8e744ae7b7e5b2ad39/src/component/Uploader/core/api/index.ts#L204-L252
bodyBuilder := &strings.Builder{}
@ -431,8 +438,8 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
))
}
bodyBuilder.WriteString("</CompleteMultipartUpload>")
req, err := http.NewRequest(
"POST",
req, err := http.NewRequestWithContext(ctx,
http.MethodPost,
u.CompleteURL,
strings.NewReader(bodyBuilder.String()),
)

View File

@ -26,15 +26,8 @@ type Addition struct {
var config = driver.Config{
Name: "Cloudreve V4",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "cloudreve://my",
CheckStatus: true,
Alert: "",
NoOverwriteUpload: true,
}

View File

@ -47,7 +47,13 @@ type BasicConfigResp struct {
type SiteLoginConfigResp struct {
LoginCaptcha bool `json:"login_captcha"`
Authn bool `json:"authn"`
// RegCaptcha bool `json:"reg_captcha"`
// ForgetCaptcha bool `json:"forget_captcha"`
// RegisterEnabled bool `json:"register_enabled"`
// TosURL string `json:"tos_url"`
// PrivacyPolicyURL string `json:"privacy_policy_url"`
// SsoDisplayName string `json:"sso_display_name"`
// OidcDisplayName string `json:"oidc_display_name"`
}
type PrepareLoginResp struct {

View File

@ -19,7 +19,9 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go"
)
@ -95,9 +97,6 @@ func (d *CloudreveV4) login() error {
if err != nil {
return err
}
if !siteConfig.Authn {
return errors.New("authn not support")
}
var prepareLogin PrepareLoginResp
err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
if err != nil {
@ -253,13 +252,16 @@ func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u Fi
}
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadUrls[0]
credential := u.Credential
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -267,30 +269,29 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
err = retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Authorization", fmt.Sprint(credential))
req.Header.Set("User-Agent", d.getUA())
err = func() error {
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return errors.New(res.Status)
return fmt.Errorf("server error: %d", res.StatusCode)
}
body, err := io.ReadAll(res.Body)
if err != nil {
@ -305,31 +306,30 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
return errors.New(up.Msg)
}
return nil
}()
if err == nil {
retryCount = 0
}, retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
chunk++
} else {
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
time.Sleep(backoff)
}
}
return nil
}
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadUrls[0]
var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -337,47 +337,47 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
err = retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, file.GetSize()))
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
switch {
case res.StatusCode >= 500 && res.StatusCode <= 504:
retryCount++
if retryCount > maxRetries {
res.Body.Close()
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[CloudreveV4-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
return fmt.Errorf("server error: %d", res.StatusCode)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
data, _ := io.ReadAll(res.Body)
res.Body.Close()
return errors.New(string(data))
default:
res.Body.Close()
retryCount = 0
return nil
}
}, retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
}
}
// 上传成功发送回调请求
return d.request(http.MethodPost, "/callback/onedrive/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) {
req.SetBody("{}")
@ -385,12 +385,15 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
}
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
var finish int64 = 0
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -398,19 +401,20 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPut, u.UploadUrls[chunk],
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
err = retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadUrls[chunk],
driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
@ -419,23 +423,26 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
res.Body.Close()
switch {
case res.StatusCode != 200:
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors", maxRetries)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("server error %d, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
return fmt.Errorf("server error: %d", res.StatusCode)
case etag == "":
return errors.New("failed to get ETag from header")
default:
retryCount = 0
etags = append(etags, etag)
return nil
}
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
chunk++
}
}
// s3LikeFinishUpload
bodyBuilder := &strings.Builder{}
@ -448,8 +455,8 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
))
}
bodyBuilder.WriteString("</CompleteMultipartUpload>")
req, err := http.NewRequest(
"POST",
req, err := http.NewRequestWithContext(ctx,
http.MethodPost,
u.CompleteURL,
strings.NewReader(bodyBuilder.String()),
)

View File

@ -1,12 +1,14 @@
package crypt
import (
"bytes"
"context"
"fmt"
"io"
stdpath "path"
"regexp"
"strings"
"sync"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
@ -110,7 +112,7 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
//return d.list(ctx, d.RemotePath, path)
//remoteFull
objs, err := fs.List(ctx, d.getPathForRemote(path, true), &fs.ListArgs{NoLog: true})
objs, err := fs.List(ctx, d.getPathForRemote(path, true), &fs.ListArgs{NoLog: true, Refresh: args.Refresh})
// the obj must implement the model.SetPath interface
// return objs, err
if err != nil {
@ -241,6 +243,9 @@ func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) {
//return nil, errs.ObjectNotFound
}
// https://github.com/rclone/rclone/blob/v1.67.0/backend/crypt/cipher.go#L37
const fileHeaderSize = 32
func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
dstDirActualPath, err := d.getActualPathForRemote(file.GetPath(), false)
if err != nil {
@ -251,61 +256,69 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, err
}
if remoteLink.RangeReadCloser == nil && remoteLink.MFile == nil && len(remoteLink.URL) == 0 {
remoteSize := remoteLink.ContentLength
if remoteSize <= 0 {
remoteSize = remoteFile.GetSize()
}
rrf, err := stream.GetRangeReaderFromLink(remoteSize, remoteLink)
if err != nil {
_ = remoteLink.Close()
return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion")
}
remoteFileSize := remoteFile.GetSize()
remoteClosers := utils.EmptyClosers()
rangeReaderFunc := func(ctx context.Context, underlyingOffset, underlyingLength int64) (io.ReadCloser, error) {
length := underlyingLength
if underlyingLength >= 0 && underlyingOffset+underlyingLength >= remoteFileSize {
length = -1
mu := &sync.Mutex{}
var fileHeader []byte
rangeReaderFunc := func(ctx context.Context, offset, limit int64) (io.ReadCloser, error) {
length := limit
if offset == 0 && limit > 0 {
mu.Lock()
if limit <= fileHeaderSize {
defer mu.Unlock()
if fileHeader != nil {
return io.NopCloser(bytes.NewReader(fileHeader[:limit])), nil
}
rrc := remoteLink.RangeReadCloser
if len(remoteLink.URL) > 0 {
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, remoteLink)
length = fileHeaderSize
} else if fileHeader == nil {
defer mu.Unlock()
} else {
mu.Unlock()
}
}
remoteReader, err := rrf.RangeRead(ctx, http_range.Range{Start: offset, Length: length})
if err != nil {
return nil, err
}
rrc = converted
if offset == 0 && limit > 0 {
fileHeader = make([]byte, fileHeaderSize)
n, err := io.ReadFull(remoteReader, fileHeader)
if n != fileHeaderSize {
fileHeader = nil
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", fileHeaderSize, n, err)
}
if limit <= fileHeaderSize {
remoteReader.Close()
return io.NopCloser(bytes.NewReader(fileHeader[:limit])), nil
} else {
remoteReader = utils.ReadCloser{
Reader: io.MultiReader(bytes.NewReader(fileHeader), remoteReader),
Closer: remoteReader,
}
}
if rrc != nil {
remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length})
remoteClosers.AddClosers(rrc.GetClosers())
if err != nil {
return nil, err
}
return remoteReader, nil
}
if remoteLink.MFile != nil {
_, err := remoteLink.MFile.Seek(underlyingOffset, io.SeekStart)
if err != nil {
return nil, err
}
//keep reuse same MFile and close at last.
remoteClosers.Add(remoteLink.MFile)
return io.NopCloser(remoteLink.MFile), nil
}
return nil, errs.NotSupport
}
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
return &model.Link{
RangeReader: stream.RangeReaderFunc(func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length)
if err != nil {
return nil, err
}
return readSeeker, nil
}
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
resultLink := &model.Link{
RangeReadCloser: resultRangeReadCloser,
Expiration: remoteLink.Expiration,
}
return resultLink, nil
}),
SyncClosers: utils.NewSyncClosers(remoteLink),
}, nil
}
func (d *Crypt) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
@ -388,7 +401,6 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
},
Reader: wrappedIn,
Mimetype: "application/octet-stream",
WebPutAsTask: streamer.NeedStore(),
ForceStreamUpload: true,
Exist: streamer.GetExist(),
}

View File

@ -28,15 +28,10 @@ type Addition struct {
var config = driver.Config{
Name: "Crypt",
LocalSort: true,
OnlyLocal: true,
OnlyProxy: true,
NoCache: true,
NoUpload: false,
NeedMs: false,
DefaultRoot: "/",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
NoLinkURL: true,
}
func init() {

View File

@ -236,7 +236,7 @@ func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
// 根据文件大小选择上传方式
if file.GetSize() <= 1*utils.MB { // 小于1MB使用普通模式上传
return d.Upload(&uploadConfig, dstDir, file, up, dataType)
return d.Upload(ctx, &uploadConfig, dstDir, file, up, dataType)
}
// 大文件使用分片上传
return d.UploadByMultipart(ctx, &uploadConfig, file.GetSize(), dstDir, file, up, dataType)

View File

@ -18,15 +18,7 @@ type Addition struct {
var config = driver.Config{
Name: "Doubao",
LocalSort: true,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
}
func init() {

View File

@ -129,7 +129,7 @@ type BuiAuditInfo struct {
AuditInfo AuditInfo `json:"audit_info"`
IsAuditing bool `json:"is_auditing"`
AuditStatus int `json:"audit_status"`
LastUpdateTime int `json:"last_update_time"`
LastUpdateTime int64 `json:"last_update_time"`
UnpassReason string `json:"unpass_reason"`
Details Details `json:"details"`
}
@ -184,7 +184,7 @@ type UserInfo struct {
SecUserID string `json:"sec_user_id"`
SessionKey string `json:"session_key"`
UseHmRegion bool `json:"use_hm_region"`
UserCreateTime int `json:"user_create_time"`
UserCreateTime int64 `json:"user_create_time"`
UserID int64 `json:"user_id"`
UserIDStr string `json:"user_id_str"`
UserVerified bool `json:"user_verified"`

View File

@ -14,7 +14,7 @@ import (
"math/rand"
"net/http"
"net/url"
"path/filepath"
stdpath "path"
"sort"
"strconv"
"strings"
@ -24,6 +24,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
@ -353,7 +354,7 @@ func (d *Doubao) getUploadConfig(upConfig *UploadConfig, dataType string, file m
"ServiceId": d.UploadToken.Alice[dataType].ServiceID,
"NeedFallback": "true",
"FileSize": strconv.FormatInt(file.GetSize(), 10),
"FileExtension": filepath.Ext(file.GetName()),
"FileExtension": stdpath.Ext(file.GetName()),
"s": randomString(),
}
}
@ -447,39 +448,65 @@ func (d *Doubao) uploadNode(uploadConfig *UploadConfig, dir model.Obj, file mode
}
// Upload 普通上传实现
func (d *Doubao) Upload(config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
data, err := io.ReadAll(file)
func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
ss, err := stream.NewStreamSectionReader(file, int(file.GetSize()), &up)
if err != nil {
return nil, err
}
reader, err := ss.GetSectionReader(0, file.GetSize())
if err != nil {
return nil, err
}
// 计算CRC32
crc32Hash := crc32.NewIEEE()
crc32Hash.Write(data)
w, err := utils.CopyWithBuffer(crc32Hash, reader)
if w != file.GetSize() {
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", file.GetSize(), w, err)
}
crc32Value := hex.EncodeToString(crc32Hash.Sum(nil))
// 构建请求路径
uploadNode := config.InnerUploadAddress.UploadNodes[0]
storeInfo := uploadNode.StoreInfos[0]
uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI)
uploadResp := UploadResp{}
if _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Type": "application/octet-stream",
"Content-Crc32": crc32Value,
"Content-Length": fmt.Sprintf("%d", len(data)),
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
})
req.SetBody(data)
}, &uploadResp); err != nil {
return nil, err
rateLimitedRd := driver.NewLimitedUploadStream(ctx, reader)
err = d._retryOperation("Upload", func() error {
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl, rateLimitedRd)
if err != nil {
return err
}
if uploadResp.Code != 2000 {
return nil, fmt.Errorf("upload failed: %s", uploadResp.Message)
req.Header = map[string][]string{
"Referer": {BaseURL + "/"},
"Origin": {BaseURL},
"User-Agent": {UserAgent},
"X-Storage-U": {d.UserId},
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {fmt.Sprintf("%d", file.GetSize())},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
resp := UploadResp{}
utils.Json.Unmarshal(bytes, &resp)
if resp.Code != 2000 {
return fmt.Errorf("upload part failed: %s", resp.Message)
} else if resp.Data.Crc32 != crc32Value {
return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, resp.Data.Crc32)
}
return nil
})
ss.FreeSectionReader(reader)
if err != nil {
return nil, err
}
uploadNodeResp, err := d.uploadNode(config, dstDir, file, dataType)
@ -516,69 +543,107 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
if config.InnerUploadAddress.AdvanceOption.SliceSize > 0 {
chunkSize = int64(config.InnerUploadAddress.AdvanceOption.SliceSize)
}
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return nil, err
}
totalParts := (fileSize + chunkSize - 1) / chunkSize
// 创建分片信息组
parts := make([]UploadPart, totalParts)
// 缓存文件
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, fmt.Errorf("failed to cache file: %w", err)
}
defer tempFile.Close()
up(10.0) // 更新进度
// 设置并行上传
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(1),
thread := min(int(totalParts), d.uploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(MaxRetryAttempts),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
retry.DelayType(retry.BackOffDelay),
retry.MaxJitter(200*time.Millisecond),
)
var partsMutex sync.Mutex
// 并行上传所有分片
for partIndex := int64(0); partIndex < totalParts; partIndex++ {
hash := crc32.NewIEEE()
for partIndex := range totalParts {
if utils.IsCanceled(uploadCtx) {
break
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片编号从1开始
threadG.Go(func(ctx context.Context) error {
// 计算此分片的大小和偏移
offset := partIndex * chunkSize
size := chunkSize
if partIndex == totalParts-1 {
size = fileSize - offset
}
limitedReader := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, size))
// 读取数据到内存
data, err := io.ReadAll(limitedReader)
if err != nil {
return fmt.Errorf("failed to read part %d: %w", partNumber, err)
}
// 计算CRC32
crc32Value := calculateCRC32(data)
// 使用_retryOperation上传分片
var uploadPart UploadPart
if err = d._retryOperation(fmt.Sprintf("Upload part %d", partNumber), func() error {
var reader *stream.SectionReader
var rateLimitedRd io.Reader
crc32Value := ""
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
uploadPart, err = d.uploadPart(config, uploadUrl, uploadID, partNumber, data, crc32Value)
reader, err = ss.GetSectionReader(offset, size)
if err != nil {
return err
}); err != nil {
return fmt.Errorf("part %d upload failed: %w", partNumber, err)
}
hash.Reset()
w, err := utils.CopyWithBuffer(hash, reader)
if w != size {
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", size, w, err)
}
crc32Value = hex.EncodeToString(hash.Sum(nil))
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
return nil
},
Do: func(ctx context.Context) error {
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s?uploadid=%s&part_number=%d&phase=transfer", uploadUrl, uploadID, partNumber), rateLimitedRd)
if err != nil {
return err
}
req.Header = map[string][]string{
"Referer": {BaseURL + "/"},
"Origin": {BaseURL},
"User-Agent": {UserAgent},
"X-Storage-U": {d.UserId},
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {fmt.Sprintf("%d", size)},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
uploadResp := UploadResp{}
utils.Json.Unmarshal(bytes, &uploadResp)
if uploadResp.Code != 2000 {
return fmt.Errorf("upload part failed: %s", uploadResp.Message)
} else if uploadResp.Data.Crc32 != crc32Value {
return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32)
}
// 记录成功上传的分片
partsMutex.Lock()
parts[partIndex] = UploadPart{
PartNumber: strconv.FormatInt(partNumber, 10),
Etag: uploadPart.Etag,
Etag: uploadResp.Data.Etag,
Crc32: crc32Value,
}
partsMutex.Unlock()
// 更新进度
progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts)
up(math.Min(progress, 95.0))
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
})
}
@ -681,42 +746,6 @@ func (d *Doubao) initMultipartUpload(config *UploadConfig, uploadUrl string, sto
return uploadResp.Data.UploadId, nil
}
// 分片上传实现
func (d *Doubao) uploadPart(config *UploadConfig, uploadUrl, uploadID string, partNumber int64, data []byte, crc32Value string) (resp UploadPart, err error) {
uploadResp := UploadResp{}
storeInfo := config.InnerUploadAddress.UploadNodes[0].StoreInfos[0]
_, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Type": "application/octet-stream",
"Content-Crc32": crc32Value,
"Content-Length": fmt.Sprintf("%d", len(data)),
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
})
req.SetQueryParams(map[string]string{
"uploadid": uploadID,
"part_number": strconv.FormatInt(partNumber, 10),
"phase": "transfer",
})
req.SetBody(data)
req.SetContentLength(true)
}, &uploadResp)
if err != nil {
return resp, err
}
if uploadResp.Code != 2000 {
return resp, fmt.Errorf("upload part failed: %s", uploadResp.Message)
} else if uploadResp.Data.Crc32 != crc32Value {
return resp, fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32)
}
return uploadResp.Data, nil
}
// 完成分片上传
func (d *Doubao) completeMultipartUpload(config *UploadConfig, uploadUrl, uploadID string, parts []UploadPart) error {
uploadResp := UploadResp{}
@ -785,13 +814,6 @@ func (d *Doubao) commitMultipartUpload(uploadConfig *UploadConfig) error {
return nil
}
// 计算CRC32
func calculateCRC32(data []byte) string {
hash := crc32.NewIEEE()
hash.Write(data)
return hex.EncodeToString(hash.Sum(nil))
}
// _retryOperation 操作重试
func (d *Doubao) _retryOperation(operation string, fn func() error) error {
return retry.Do(

View File

@ -14,15 +14,8 @@ type Addition struct {
var config = driver.Config{
Name: "DoubaoShare",
LocalSort: true,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: true,
NeedMs: false,
DefaultRoot: "/",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
}
func init() {

View File

@ -79,11 +79,11 @@ type ShareInfo struct {
RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"`
CreateTime int `json:"create_time"`
UpdateTime int `json:"update_time"`
CreateTime int64 `json:"create_time"`
UpdateTime int64 `json:"update_time"`
} `json:"first_node"`
NodeCount int `json:"node_count"`
CreateTime int `json:"create_time"`
CreateTime int64 `json:"create_time"`
Channel string `json:"channel"`
InfluencerType int `json:"influencer_type"`
}
@ -111,8 +111,8 @@ type FilePath []struct {
RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"`
CreateTime int `json:"create_time"`
UpdateTime int `json:"update_time"`
CreateTime int64 `json:"create_time"`
UpdateTime int64 `json:"update_time"`
}
type GetFileUrlResp struct {

View File

@ -192,12 +192,11 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
url := d.contentBase + "/2/files/upload_session/append_v2"
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize))
req, err := http.NewRequest(http.MethodPost, url, reader)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, reader)
if err != nil {
log.Errorf("failed to update file when append to upload session, err: %+v", err)
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken)

View File

@ -18,13 +18,6 @@ type Addition struct {
var config = driver.Config{
Name: "Dropbox",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "",
NoOverwriteUpload: true,
}

View File

@ -169,11 +169,10 @@ func (d *Dropbox) getFiles(ctx context.Context, path string) ([]File, error) {
func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset int64, sessionId string) error {
url := d.contentBase + "/2/files/upload_session/finish"
req, err := http.NewRequest(http.MethodPost, url, nil)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
@ -214,11 +213,10 @@ func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset
func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
url := d.contentBase + "/2/files/upload_session/start"
req, err := http.NewRequest(http.MethodPost, url, nil)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
if err != nil {
return "", err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
req.Header.Set("Dropbox-API-Arg", "{\"close\":false}")

View File

@ -17,16 +17,8 @@ type Addition struct {
var config = driver.Config{
Name: "FebBox",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: true,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
}
func init() {

View File

@ -31,13 +31,13 @@ func (c *customTokenSource) Token() (*oauth2.Token, error) {
v.Set("client_id", c.config.ClientID)
v.Set("client_secret", c.config.ClientSecret)
req, err := http.NewRequest("POST", c.config.TokenURL, strings.NewReader(v.Encode()))
req, err := http.NewRequestWithContext(c.ctx, http.MethodPost, c.config.TokenURL, strings.NewReader(v.Encode()))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := http.DefaultClient.Do(req.WithContext(c.ctx))
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}

View File

@ -2,11 +2,16 @@ package ftp
import (
"context"
"errors"
"io"
stdpath "path"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/jlaffaye/ftp"
)
@ -14,6 +19,9 @@ type FTP struct {
model.Storage
Addition
conn *ftp.ServerConn
ctx context.Context
cancel context.CancelFunc
}
func (d *FTP) Config() driver.Config {
@ -25,12 +33,16 @@ func (d *FTP) GetAddition() driver.Additional {
}
func (d *FTP) Init(ctx context.Context) error {
return d.login()
d.ctx, d.cancel = context.WithCancel(context.Background())
var err error
d.conn, err = d._login(ctx)
return err
}
func (d *FTP) Drop(ctx context.Context) error {
if d.conn != nil {
_ = d.conn.Logout()
_ = d.conn.Quit()
d.cancel()
}
return nil
}
@ -60,15 +72,52 @@ func (d *FTP) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]m
}
func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if err := d.login(); err != nil {
conn, err := d._login(ctx)
if err != nil {
return nil, err
}
r := NewFileReader(d.conn, encode(file.GetPath(), d.Encoding), file.GetSize())
link := &model.Link{
MFile: r,
path := encode(file.GetPath(), d.Encoding)
size := file.GetSize()
resultRangeReader := func(context context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
length := httpRange.Length
if length < 0 || httpRange.Start+length > size {
length = size - httpRange.Start
}
return link, nil
var c *ftp.ServerConn
if ctx == context {
c = conn
} else {
var err error
c, err = d._login(context)
if err != nil {
return nil, err
}
}
resp, err := c.RetrFrom(path, uint64(httpRange.Start))
if err != nil {
return nil, err
}
var close utils.CloseFunc
if context == ctx {
close = resp.Close
} else {
close = func() error {
return errors.Join(resp.Close(), c.Quit())
}
}
return utils.ReadCloser{
Reader: io.LimitReader(resp, length),
Closer: close,
}, nil
}
return &model.Link{
RangeReader: &model.FileRangeReader{
RangeReaderIF: stream.RateLimitRangeReaderFunc(resultRangeReader),
},
SyncClosers: utils.NewSyncClosers(utils.CloseFunc(conn.Quit)),
}, nil
}
func (d *FTP) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {

View File

@ -33,8 +33,9 @@ type Addition struct {
var config = driver.Config{
Name: "FTP",
LocalSort: true,
OnlyLocal: true,
OnlyLinkMFile: false,
DefaultRoot: "/",
NoLinkURL: true,
}
func init() {

View File

@ -1,116 +1,43 @@
package ftp
import (
"io"
"os"
"sync"
"sync/atomic"
"context"
"fmt"
"time"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/jlaffaye/ftp"
)
// do others that not defined in Driver interface
func (d *FTP) login() error {
_, err, _ := singleflight.AnyGroup.Do(fmt.Sprintf("FTP.login:%p", d), func() (any, error) {
var err error
if d.conn != nil {
_, err := d.conn.CurrentDir()
if err == nil {
return nil
}
}
conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second))
err = d.conn.NoOp()
if err != nil {
d.conn.Quit()
d.conn = nil
}
}
if d.conn == nil {
d.conn, err = d._login(d.ctx)
}
return nil, err
})
return err
}
func (d *FTP) _login(ctx context.Context) (*ftp.ServerConn, error) {
conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second), ftp.DialWithContext(ctx))
if err != nil {
return nil, err
}
err = conn.Login(d.Username, d.Password)
if err != nil {
return err
conn.Quit()
return nil, err
}
d.conn = conn
return nil
}
// FileReader An FTP file reader that implements io.MFile for seeking.
type FileReader struct {
conn *ftp.ServerConn
resp *ftp.Response
offset atomic.Int64
readAtOffset int64
mu sync.Mutex
path string
size int64
}
func NewFileReader(conn *ftp.ServerConn, path string, size int64) *FileReader {
return &FileReader{
conn: conn,
path: path,
size: size,
}
}
func (r *FileReader) Read(buf []byte) (n int, err error) {
n, err = r.ReadAt(buf, r.offset.Load())
r.offset.Add(int64(n))
return
}
func (r *FileReader) ReadAt(buf []byte, off int64) (n int, err error) {
if off < 0 {
return -1, os.ErrInvalid
}
r.mu.Lock()
defer r.mu.Unlock()
if off != r.readAtOffset {
//have to restart the connection, to correct offset
_ = r.resp.Close()
r.resp = nil
}
if r.resp == nil {
r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
r.readAtOffset = off
if err != nil {
return 0, err
}
}
n, err = r.resp.Read(buf)
r.readAtOffset += int64(n)
return
}
func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
oldOffset := r.offset.Load()
var newOffset int64
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekCurrent:
newOffset = oldOffset + offset
case io.SeekEnd:
return r.size, nil
default:
return -1, os.ErrInvalid
}
if newOffset < 0 {
// offset out of range
return oldOffset, os.ErrInvalid
}
if newOffset == oldOffset {
// offset not changed, so return directly
return oldOffset, nil
}
r.offset.Store(newOffset)
return newOffset, nil
}
func (r *FileReader) Close() error {
if r.resp != nil {
return r.resp.Close()
}
return nil
return conn, nil
}

View File

@ -9,6 +9,7 @@ import (
"text/template"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/ProtonMail/go-crypto/openpgp"
@ -96,7 +97,7 @@ func getPathCommonAncestor(a, b string) (ancestor, aChildName, bChildName, aRest
}
func getUsername(ctx context.Context) string {
user, ok := ctx.Value("user").(*model.User)
user, ok := ctx.Value(conf.UserKey).(*model.User)
if !ok {
return "<system>"
}

View File

@ -16,16 +16,7 @@ type Addition struct {
var config = driver.Config{
Name: "GitHub Releases",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
NoUpload: true,
}
func init() {

View File

@ -162,7 +162,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
SetBody(driver.NewLimitedUploadStream(ctx, stream))
}, nil)
} else {
err = d.chunkUpload(ctx, stream, putUrl)
err = d.chunkUpload(ctx, stream, putUrl, up)
}
return err
}

View File

@ -5,17 +5,20 @@ import (
"crypto/x509"
"encoding/pem"
"fmt"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"io"
"net/http"
"os"
"regexp"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/avast/retry-go"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
"github.com/golang-jwt/jwt/v4"
@ -251,28 +254,60 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
return res, nil
}
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error {
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
var defaultChunkSize = d.ChunkSize * 1024 * 1024
var offset int64 = 0
for offset < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
chunkSize := stream.GetSize() - offset
if chunkSize > defaultChunkSize {
chunkSize = defaultChunkSize
}
reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize})
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)
if err != nil {
return err
}
reader = driver.NewLimitedUploadStream(ctx, reader)
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Length": strconv.FormatInt(chunkSize, 10),
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()),
}).SetBody(reader).SetContext(ctx)
}, nil)
var offset int64 = 0
url += "?includeItemsFromAllDrives=true&supportsAllDrives=true"
for offset < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
chunkSize := min(file.GetSize()-offset, defaultChunkSize)
reader, err := ss.GetSectionReader(offset, chunkSize)
if err != nil {
return err
}
limitedReader := driver.NewLimitedUploadStream(ctx, reader)
err = retry.Do(func() error {
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, limitedReader)
if err != nil {
return err
}
req.Header = map[string][]string{
"Authorization": {"Bearer " + d.AccessToken},
"Content-Length": {strconv.FormatInt(chunkSize, 10)},
"Content-Range": {fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, file.GetSize())},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
var e Error
utils.Json.Unmarshal(bytes, &e)
if e.Error.Code != 0 {
if e.Error.Code == 401 {
err = d.refreshToken()
if err != nil {
return err
}
}
return fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
}
up(float64(offset+chunkSize) / float64(file.GetSize()) * 100)
return nil
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
ss.FreeSectionReader(reader)
if err != nil {
return err
}

View File

@ -14,6 +14,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
@ -253,11 +254,8 @@ func (d *HalalCloud) getLink(ctx context.Context, file model.Obj, args model.Lin
chunks := getChunkSizes(result.Sizes)
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
length := httpRange.Length
if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size {
length = -1
}
if err != nil {
return nil, fmt.Errorf("open download file failed: %w", err)
if httpRange.Length < 0 || httpRange.Start+httpRange.Length >= size {
length = size - httpRange.Start
}
oo := &openObject{
ctx: ctx,
@ -279,9 +277,8 @@ func (d *HalalCloud) getLink(ctx context.Context, file model.Obj, args model.Lin
duration = time.Until(time.Now().Add(time.Hour))
}
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader}
return &model.Link{
RangeReadCloser: resultRangeReadCloser,
RangeReader: stream.RateLimitRangeReaderFunc(resultRangeReader),
Expiration: &duration,
}, nil
}

View File

@ -19,16 +19,9 @@ type Addition struct {
var config = driver.Config{
Name: "HalalCloud",
LocalSort: false,
OnlyLocal: true,
OnlyProxy: true,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "/",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
NoLinkURL: true,
}
func init() {

View File

@ -96,7 +96,3 @@ type SteamFile struct {
func (s *SteamFile) Read(p []byte) (n int, err error) {
return s.file.Read(p)
}
func (s *SteamFile) Close() error {
return s.file.Close()
}

View File

@ -276,7 +276,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
etag := s.GetHash().GetHash(utils.MD5)
var err error
if len(etag) != utils.MD5.Width {
_, etag, err = stream.CacheFullInTempFileAndHash(s, utils.MD5)
_, etag, err = stream.CacheFullAndHash(s, &up, utils.MD5)
if err != nil {
return nil, err
}

View File

@ -30,16 +30,8 @@ func init() {
return &ILanZou{
config: driver.Config{
Name: "ILanZou",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
LocalSort: true,
},
conf: Conf{
base: "https://api.ilanzou.com",
@ -56,16 +48,8 @@ func init() {
return &ILanZou{
config: driver.Config{
Name: "FeijiPan",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
LocalSort: true,
},
conf: Conf{
base: "https://api.feijipan.com",

View File

@ -17,7 +17,6 @@ var config = driver.Config{
Name: "IPFS API",
DefaultRoot: "/",
LocalSort: true,
OnlyProxy: false,
}
func init() {

View File

@ -15,7 +15,6 @@ type Addition struct {
var config = driver.Config{
Name: "KodBox",
DefaultRoot: "",
}
func init() {

View File

@ -3,6 +3,8 @@ package LenovoNasShare
import (
"context"
"net/http"
"net/url"
"strings"
"time"
"github.com/go-resty/resty/v2"
@ -32,6 +34,10 @@ func (d *LenovoNasShare) Init(ctx context.Context) error {
if err := d.getStoken(); err != nil {
return err
}
if !d.ShowRootFolder && d.RootFolderPath == "" {
list, _ := d.List(ctx, File{}, model.ListArgs{})
d.RootFolderPath = list[0].GetPath()
}
return nil
}
@ -43,23 +49,46 @@ func (d *LenovoNasShare) List(ctx context.Context, dir model.Obj, args model.Lis
d.checkStoken() // 检查stoken是否过期
files := make([]File, 0)
path := dir.GetPath()
if path == "" && !d.ShowRootFolder && d.RootFolderPath != "" {
path = d.RootFolderPath
}
var resp Files
query := map[string]string{
"code": d.ShareId,
"num": "5000",
"stoken": d.stoken,
"path": dir.GetPath(),
"path": path,
}
_, err := d.request(d.Host+"/oneproxy/api/share/v1/files", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query)
}, &resp)
if err != nil {
return nil, err
}
files = append(files, resp.Data.List...)
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
if src.IsDir() {
return src, nil
}
return &model.ObjThumb{
Object: model.Object{
Name: src.GetName(),
Size: src.GetSize(),
Modified: src.ModTime(),
IsFolder: src.IsDir(),
},
Thumbnail: model.Thumbnail{
Thumbnail: func() string {
thumbUrl := d.Host + "/oneproxy/api/share/v1/file/thumb?code=" + d.ShareId + "&stoken=" + d.stoken + "&path=" + url.QueryEscape(src.GetPath())
return thumbUrl
}(),
},
}, nil
})
}
@ -73,6 +102,10 @@ func (d *LenovoNasShare) getStoken() error { // 获取stoken
if d.Host == "" {
d.Host = "https://siot-share.lenovo.com.cn"
}
parts := strings.Split(d.ShareId, "/")
d.ShareId = parts[len(parts)-1]
query := map[string]string{
"code": d.ShareId,
"password": d.SharePwd,

Some files were not shown because too many files have changed in this diff Show More