Compare commits

..

90 Commits

Author SHA1 Message Date
3936e736e6 feat(drivers): add a driver that divides large files into multiple chunks (#1153) 2025-09-19 19:27:35 +08:00
68433d4f5b fix(local): cannot mkdir on specific platforms (#1304) 2025-09-19 15:34:58 +08:00
cc16cb35bf feat(style): add driver icons and disk usage (#1274)
* feat(style): add driver icons and disk usage

* feat(driver): add disk usage for 115_open, 123_open, aliyundrive_open and baidu_netdisk

* feat(driver): add disk usage for crypt, sftp and smb

* chore: clean unused variable

* feat(driver): add disk usage for cloudreve_v4

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(local): disk label check when getting disk usage

* feat(style): return details when accessing the manage page

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-09-19 11:59:11 +08:00
d3bc6321f4 chore(build): update Go version to 1.25.0 across workflows and build scripts (#1290)
build: update Go version to 1.25.0 across workflows and build scripts

fixes #1286
2025-09-16 18:44:29 +08:00
cbbb5ad231 fix(stream): http chucked upload issue (#1152)
* fix(stream): http chucked upload issue

* fix(stream): use MmapThreshold

* fix(stream): improve caching mechanism and handle size=0 case

* fix bug

* fix(buffer): optimize ReadAt method for improved performance

* fix(upload): handle Content-Length and File-Size headers for better size management

* fix(189pc): 移除重复限速

* fix(upload): handle negative file size during streaming uploads

* fix(upload): update header key from File-Size to X-File-Size for size retrieval

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-09-15 19:36:16 +08:00
c1d03c5bcc fix(security): zip slip (#1228)
* fix(security): Zip Slip

* chore:remove repeat clean

* fix: archives,iso9660 and rardecode module

---------

Co-authored-by: ILoveScratch <ilovescratch@foxmail.com>
2025-09-15 13:25:21 +08:00
61a8ed515f fix(123): add get and list hash info (#1278) 2025-09-14 21:36:54 +08:00
bbb7c06504 feat(alias): support pass through provider (#1269) 2025-09-14 21:36:38 +08:00
8bbdb272d4 docs(readme): extend driver list with newest support (#1271) 2025-09-13 20:41:17 +08:00
c15ae94307 feat(189PC,189TV): add refreshToken and qrcode login (#1205)
### Key Changes
- **189PC**: Add QR code login and refresh token support
- **189TV**: Add session refresh mechanism and fix TempUuid persistence issue
- **Both**: Implement session keep-alive with cron jobs (5min interval)

### Features
- QR code authentication for 189PC as alternative to password login
- Automatic token refresh to avoid frequent re-authentication
- Session keep-alive to maintain long-term connections
- Retry logic with max attempts to prevent infinite loops

### Fixes
- Fixed 189TV TempUuid causing storage corruption on QR code reload
- Enhanced error handling for token expiration scenarios
2025-09-13 13:59:47 +08:00
f1a5048558 feat(drivers): add cnb_releases (#1033)
* feat(drivers): add cnb_releases

* feat(cnb_release): implement reference

* refactor(cnb_releases): get release info by ID instead of tag name

* feat(cnb_releases): add option to use tag name instead of release name

* fix(cnb_releases): set default root and improve release info retrieval

* feat(cnb_releases): implement Put

* perf(cnb_release): use io.Pipe to stream file upload

* perf(cnb_releases): add context timeout for file upload request

* feat(cnb_releases): implement Remove

* feat(cnb_releases): implement MakeDir

* feat(cnb_releases): implement Rename

* feat(cnb_releases): require repo and token in Addition

* chore(cnb_releases): remove unused code

* Revert 'perf(cnb_release): use io.Pipe to stream file upload'

* perf(cnb_releases): optimize upload with MultiReader

* feat(cnb_releases): add DefaultBranch

---------

Co-authored-by: ILoveScratch <ilovescratch@foxmail.com>
2025-09-11 18:11:32 +08:00
1fe26bff9a feat(local): auto create recycle dir if not exists (#1244) 2025-09-10 20:57:21 +08:00
433dcd156b fix(ci): add tag_name to upload assets step (#1234)
fix(release): add tag_name to upload assets step
2025-09-06 22:51:05 +08:00
e97f0a289e feat(cloudreve_v4): enhance token management (#1171)
* fix(cloudreve_v4): improve error handling in request method

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* feat(cloudreve_v4): enhance token management with expiration checks and refresh logic

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* feat(cloudreve_v4): add JWT structures for access and refresh tokens; validate access token on initialization

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(cloudreve_v4): improve error messages

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-09-04 19:41:41 +08:00
89f35170b3 fix(fs): clear cache after directory rename to ensure consistency (#1193)
Clear cache after renaming the directory.
2025-09-01 18:47:54 +08:00
8188fb2d7d fix(123open): get direct link (#1185)
* fix(123open): correct query parameter name from 'fileId' to 'fileID' in getDirectLink function

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123open): change SpaceTempExpr type from 'string' to 'int64' in UserInfoResp struct

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123open): comment out unused fields in UserInfoResp struct

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123open): add getUID method and cache UID

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-31 15:47:38 +08:00
87cf95f50b fix(139): refactor part upload logic (#1184)
* fix(139): refactor part upload logic

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): handle upload errors

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): sort upload parts by PartNumber before uploading

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): improve error handling

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): add validation for upload part index to prevent out of bounds errors

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-31 15:47:12 +08:00
8ab26cb823 fix(123open): change DirectLink type from 'boolean' to 'bool' (#1180)
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-29 19:06:37 +08:00
5880c8e1af fix(189tv): use rate-limited upload stream in OldUpload function (#1176)
* fix(189tv): use rate-limited upload stream in OldUpload function

* fix(189tv): wrap tempFile with io.NopCloser to prevent premature closure in OldUpload function

* .
2025-08-29 16:01:50 +08:00
14bf4ecb4c fix(share): support custom proxy url (#1130)
* feat(share): support custom proxy url

* fix(share): count access

* fix: maybe a path traversal vulnerability?
2025-08-28 22:11:19 +08:00
04a5e58781 fix(server): can't edit .md source files (#1159)
* fix(server): can't edit .md source files

* chore

* add ignore direct link args
2025-08-28 16:19:57 +08:00
bbd4389345 fix(wopan): use fixed timezone for parsing time (#1170)
fix(wopan): update getTime function to use fixed timezone for parsing

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-28 13:02:02 +08:00
f350ccdf95 fix(189pc): sliceSize must not be equal to fileSize (#1169)
* fix(189pc): sliceSize not equal to fileSize

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* Update comment for sliceSize parameter

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-28 11:32:40 +08:00
4f2de9395e feat(degoo): token improvement (#1149)
* Update driver.go

Signed-off-by: Caspian <app@caspian.im>

* Update meta.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* make account optional

* ensure username and password

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: Caspian <app@caspian.im>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-26 01:22:59 +08:00
b0dbbebfb0 feat(drivers): add Teldrive driver (#1116)
https://github.com/tgdrive/teldrive

https://teldrive-docs.pages.dev/docs/api

实现:
* copy
* move
* link (302 share and local proxy)
* chunked uploads
* rename

未实现:
- openlist扫码登陆
- refresh token

https://github.com/OpenListTeam/OpenList-Docs/pull/155


* feat(Teldrive): Add driver Teldrive

* fix(teldrive): force webproxy and memory optimized

* chore(teldrive): go fmt

* chore(teldrive): remove TODO

* chore(teldrive): organize code

* feat(teldrive): add UseShareLink option and support 302

* fix(teldrive): standardize API path construction

* fix(teldrive): trim trailing slash from Address in Init method

* chore(teldrive): update help text for UseShareLink field in Addition struct

* fix(teldrive): set 10 MiB as default chunk size

---------

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: ILoveScratch <ilovescratch@foxmail.com>
2025-08-25 01:34:08 +08:00
0c27b4bd47 docs(contributing): update guidelines (#983)
[skip ci]

* docs(contributing): update guidelines

* docs(contributing): clarify fork

* docs(contributing): sync translation

Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* docs(contributing): add label and cc reminder

* docs(contributing): remove ensure new branch from checklist

* docs(contributing): replace generic GitHub URLs with user-specific ones

* docs(contributing): make branch deletion after PR merge optional

* docs(contributing): keep --recurse-submodules

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
2025-08-24 20:13:11 +08:00
736cd9e5f2 fix(quark): fix getTranscodingLink (#1136)
The first video info may not contain url

* fix(quark): fix getTranscodingLink

* fix(quark_tv): fix getTranscodingLink
2025-08-24 19:55:10 +08:00
c7a603c926 fix(115): fix get 115 app version (#1137) 2025-08-24 19:50:21 +08:00
a28d6d5693 fix(123_open): fix token refresh (#1121) 2025-08-23 23:01:41 +08:00
e59d2233e2 feat(drivers): add Degoo driver (#1097)
* Create driver.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Create util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Create types.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Create meta.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update drivers/degoo/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: CaspianGUAN <app@caspian.im>

* Update drivers/degoo/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: CaspianGUAN <app@caspian.im>

* Update driver.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update meta.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update types.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update driver.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update drivers/degoo/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: CaspianGUAN <app@caspian.im>

* Update util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* refactor(degoo): add Degoo driver integration and update API handling

* fix(degoo): apply suggestions

---------

Signed-off-by: CaspianGUAN <app@caspian.im>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-23 22:47:02 +08:00
01914a06ef refactor(ci): add permissions check at docker's entrypoint (#1128)
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-22 19:35:48 +08:00
6499374d1c fix(deps): update 115driver to v1.1.1 (close SheltonZhu/115driver#57) (#1115) 2025-08-20 21:33:21 +08:00
b054919d5c feat(ilanzou): add support for rapid upload and fix duplication handling (#1065)
* feat(ilanzou): add support for rapid upload token handling

* feat(ilanzou): add NoOverwriteUpload option
2025-08-19 19:19:44 +08:00
048ee9c2e5 feat(server): adapting #1099 to #991 (#1102) 2025-08-19 15:48:59 +08:00
23394548ca feat(123_open): add DirectLink option (#1045)
* feat(123_open): add `UseDirectLink` option

* feat(123_open): update rate limit rules

* fix(123_open): update api

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* feat(123_open): enhance direct link functionality with private key and expiration

* refactor(123_open): use UUID for random generation

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-19 15:23:10 +08:00
b04677b806 feat(server): add error page and status code (#1099) 2025-08-19 15:18:12 +08:00
e4c902dd93 feat(share): support more secure file sharing (#991)
提供一种类似大多数网盘的文件分享操作,这种分享方式可以通过强制 Web 代理隐藏文件源路径,可以设置分享码、最大访问数和过期时间,并且不需要启用 guest 用户。

在全局设置中可以调整:
- 是否强制 Web 代理
- 是否允许预览
- 是否允许预览压缩文件
- 分享文件后,点击“复制链接”按钮复制的内容

前端部分:OpenListTeam/OpenList-Frontend#156
文档部分:OpenListTeam/OpenList-Docs#130

Close #183
Close #526
Close #860
Close #892
Close #1079


* feat(share): support more secure file sharing

* feat(share): add archive preview

* fix(share): fix some bugs

* feat(openlist_share): add openlist share driver

* fix(share): lack unwrap when get virtual path

* fix: use unwrapPath instead of path for virtual file name comparison

* fix(share): change request method of /api/share/list from GET to Any

* fix(share): path traversal vulnerability in sharing path check

* 修复分享alias驱动的文件 没开代理时无法获取URL

* fix(sharing): update error message for sharing root link extraction

---------

Co-authored-by: Suyunmeng <69945917+Suyunmeng@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-08-19 15:10:02 +08:00
5d8bd258c0 refactor(docker): reduce docker image size (#1091)
* fix(docker): reduce image size

* refactor(docker): update user and group creation

* Update Dockerfile

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-19 10:27:33 +08:00
08c5283c8c feat(docker): Update docker-compose configuration (#1081)
* feat(docker): Update docker-compose configuration

* Update docker-compose.yml

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: huancun _- <huancun@hc26.org>

---------

Signed-off-by: huancun _- <huancun@hc26.org>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-18 14:29:59 +08:00
10a14f10cd fix(docker): improve startup process and SIGTERM handling (#1089)
* fix(ci): Modify the way of star OpenList.

* fix(ci): start runsvdir
2025-08-18 11:13:05 +08:00
f86ebc52a0 refactor(123_open): improve upload (#1076)
* refactor(123_open): improve upload

* optimize buffer initialization for multipart form

* 每次重试生成新的表单

* .
2025-08-17 14:25:23 +08:00
016ed90efa feat(stream): fast buffer freeing for large cache (#1053)
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-16 17:19:52 +08:00
d76407b201 fix(dropbox): incorrect path error during upload (#1052)
* Fix incorrect path error during upload on Dropbox

* Add RootNamespaceId to the config for direct modification

* Refactor Dropbox header logic: extract JSON marshaling into helper method

* Fix Dropbox: replace marshalToJSONString with utils.Json.MarshalToString
2025-08-16 14:18:02 +08:00
5de6b660f2 fix(terabox): user not exists error (#1056)
* fix user location error when upload file
2025-08-15 21:25:57 +08:00
71ada3b656 fix(ci-sync): fix workflow for syncing Repository (#1062) 2025-08-15 18:48:55 +08:00
dc42f0e226 [skip ci]fix(ci): update sync workflow (#1061) 2025-08-15 18:36:52 +08:00
74bf9f6467 [skip ci]feat(sync): add workflow to sync GitHub repository (#1060)
feat(sync): add workflow to sync GitHub repository to Gitee
2025-08-15 18:12:29 +08:00
d0c22a1ecb feat(ci): add the default user for docker image (#1036)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-12 09:51:40 +08:00
57fceabcf4 perf(stream): improve file stream range reading and caching mechanism (#1001)
* perf(stream): improve file stream range reading and caching mechanism

* 。

* add bytes_test.go

* fix(stream): handle EOF and buffer reading more gracefully

* 注释

* refactor: update CacheFullAndWriter to accept pointer for UpdateProgress

* update tests

* Update drivers/google_drive/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>

* 更优雅的克隆Link

* 修复stream已缓存但无法重复读取

* 将Bytes类型重命名为Reader

* 修复栈溢出

* update tests

---------

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-11 23:41:22 +08:00
8c244a984d refactor(assets): migrate to resource domain (#975)
* refactor(assets): migrate to resource domain

* feat(bootstrap): add migration value for logo and favicon settings
2025-08-10 09:57:33 +08:00
df479ba806 fix(aliyundrive_open): limit rate for every request (close #724) (#1011)
* fix(aliyundrive_open): limit rate for `Remove` and `MakeDir`; reduce limit for `List` and `Link` (close #724)

* Update drivers/aliyundrive_open/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: 火星大王 <34576789+huoxingdawang@users.noreply.github.com>

* Update drivers/aliyundrive_open/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: 火星大王 <34576789+huoxingdawang@users.noreply.github.com>

* fix(aliyundrive_open): limit rate for every request

* fix(aliyundrive_open): fix limiter not work on reference driver

* fix(aliyundrive_open): typo

* fix(aliyundrive_open): limiter not set to nil after free

* fix(aliyundrive_share): limit rate for every request

---------

Signed-off-by: 火星大王 <34576789+huoxingdawang@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-10 09:55:20 +08:00
5ae8e96237 feat(123_open): update Put method to return model.Obj (#1008)
* feat(123_open): update Put method to return model.Obj

* fix(123_open): declear time zones

* chore(123_open): fix typo

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123_open): use fixed timezone

* fix(123_open): implement PutResult interface for Open123 driver

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: Suyunmeng <69945917+Suyunmeng@users.noreply.github.com>
2025-08-09 15:09:12 +08:00
aa0ced47b0 fix(webdav): Handle HEAD requests for directories with appropriate headers (#1015)
Implement handling of HEAD requests for directories by setting the correct Content-Type and Content-Length headers.
2025-08-09 13:57:09 +08:00
ab747d9052 feat(config): Add PWA manifest.json endpoint for web app installation (#990)
* feat(config): Add PWA manifest.json endpoint for web app installation

* fix: Update comment to English in manifest handler

* fix: fix EOL

* fix: Remove unused fmt import from manifest handler

* feat: use site settings for manifest name and icon

* fix(manifest): Move manifest.json route to static handler for proper CDN handling

* feat: move manifest.json handler to static package and improve path handling

* feat: Add custom static file handler to prevent manifest.json conflicts

* fix: Integrate manifest.json handling into static file serving routes

* fix: Simplify PWA manifest scope handling and static file serving

- Remove CDN-specific logic for PWA manifest scope and start_url
- Always use base path for PWA scope regardless of CDN configuration
- Replace manual file serving logic with http.FileServer for static assets

* fix: Ensure consistent base path handling in site configuration and manifest path construction

* fix: Refactor trailing slash handling in site configuration

* feat(static): update manifest path handling and add route for manifest.json
2025-08-08 20:07:51 +08:00
93c06213d4 feat(local): add directory size support (#624)
* feat(local): add directory size support

* fix(local): fix and improve directory size calculation

* style(local): fix code style

* style(local): fix code style

* style(local): fix code style

* fix(local): refresh directory size when force refresh

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>

* fix:(local): Avoid traversing the parent's parent, which leads to an endless loop

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>

* fix(local:) refresh dir size only enabled

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>

* fix(local): logical error && add RecalculateDirSize && cleaner code for int64

* feat(local): add Benchmark for CalculateDirSize

* refactor(local): 优化移动中对于错误的判断。

---------

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>
Co-authored-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>
2025-08-08 16:59:16 +08:00
b9b8eed285 [skip ci]feat(ci): add FRONTEND_REPO variable to workflows and build script (#1006) 2025-08-08 16:36:22 +08:00
317d190b77 fix(ftp): create a new connection for each download (#989) 2025-08-06 20:35:01 +08:00
52d7d819ad feat(lenovonas_share): add thumb (#986) 2025-08-06 17:34:43 +08:00
0483e0f868 feat(driver_strm): also shown some files with strm (#969)
* feat(driver_strm): Also shown some files with strm

Allow user set some file types that need to shown with strm, usually subtitles

Most of code was copy and managed from drivers/alias

* 优化

* 优化

* 。

* 添加注释

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
2025-08-06 15:40:48 +08:00
08dae4f55f feat(123_open): update upload api v2 (#976) 2025-08-06 15:27:13 +08:00
9ac0484bc0 perf(ftp): improve concurrent Link response; fix alias/local driver issues (#974) 2025-08-06 13:32:37 +08:00
8cf15183a0 perf: optimize upload (#554)
* pref(115,123): optimize upload

* chore

* aliyun_open, google_drive

* fix bug

* chore

* cloudreve, cloudreve_v4, onedrive, onedrive_app

* chore(conf): add `max_buffer_limit` option

* 123pan multithread upload

* doubao

* google_drive

* chore

* chore

* chore: 计算分片数量的代码

* MaxBufferLimit自动挡

* MaxBufferLimit自动挡

* 189pc

* errorgroup添加Lifecycle

* 查缺补漏

* Conf.MaxBufferLimit单位为MB

* 。

---------

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-05 21:42:54 +08:00
c8f2aaaa55 feat(cmd): add delete command for storage (#952) 2025-08-04 17:30:43 +08:00
1208bd0a83 fix(fs): nil interface not equal to nil (#971)
https://go.dev/doc/faq#nil_error
2025-08-03 23:51:11 +08:00
6b096bcad4 fix(fs): deadlock when get link error (#963) 2025-08-02 17:49:53 +08:00
58dbf088f9 fix(fs): forget cache when get link error (#956) 2025-08-02 11:03:34 +08:00
05ff7908f2 fix(strm): encoded path is ineffective (#951) 2025-08-02 00:23:18 +08:00
a703b736c9 feat(offline_download): filter empty URLs in offline download requests (#948) 2025-08-01 16:12:21 +08:00
e458f2ab53 fix(bootstrap): add newline after initial admin password output (#943)
fix(bootstrap): add newline after initial admin  password output
2025-08-01 13:43:41 +08:00
a5a22e7085 fix(local): Treat junction as directory in Windows. (#809)
Treat junction as directory in Windows.
2025-07-31 13:54:56 +08:00
9469c95b14 fix(security): potential XSS vulnerabilities (#896) 2025-07-31 12:57:20 +08:00
cf912dcf7a fix(cmd): output to console (#920)
fix(cmd): output to terminal
2025-07-31 11:44:00 +08:00
ccd4af26e5 feat(patch): add migration from Alist V3 driver to OpenList (#919)
* feat(patch): add migration from Alist V3 driver to OpenList

* chore(patch): improve logging
2025-07-31 11:43:21 +08:00
1682e873d6 feat(search): enhanced meilisearch search experience (#864)
* feat(search): enhanced `meilisearch` search experience
- upgrade `meilisearch` dependency
- support subdirectory search
- optimize searchDocument fields for subdirectory search
- specify full index uid instead of index prefix

* fix(search): more fixes to `meilisearch`
- make use of context where context was not used
- remove code of waiting task in deletion process, as tasks are queued and will be executed orderly (if tasks were submitted to the queue successfully), which can improve `AutoUpdate` performance
2025-07-31 11:24:22 +08:00
54ae7e6d9b feat(115_open): Add GetObjInfo to accelerate getting link (#888)
* feat(115_open): Add GetObjInfo to accelerate getting link

* feat(fs): use cache directly when cache exist
2025-07-31 11:20:02 +08:00
991da7d87f feat(strm): add local mode (#885)
* feat(strm): add local mode

* Update drivers/strm/meta.go

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>

* feat(strm): local mode add sign

---------

Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-31 11:18:59 +08:00
Dgs
a498091aef fix(123&&123_share): fix link request header referer (#915) 2025-07-31 10:10:38 +08:00
976c82bb2b fix(drivers): update time-related fields to int64 (#913)
- In doubao/types.go:
  - Change LastUpdateTime from int to int64
  - Change UserCreateTime from int to int64
- In doubao_share/types.go:
  - Change CreateTime and UpdateTime from int to int64 in ShareInfo and FilePath
- In quark_uc/types.go:
  - Change UpdateTime from int to int64 in TranscodingResp

These changes ensure consistent and accurate representation of timestamp data across the project.
2025-07-31 10:10:32 +08:00
5b41a3bdff feat(ci): Add support for LoongArch64 architecture builds (#907) 2025-07-31 10:10:19 +08:00
19d1a3b785 refactor(ci): Refactor Docker build to use base images and dynamic Dockerfile generation (#904) 2025-07-30 15:04:29 +08:00
3c7b0c4999 fix(qb): Configure HTTP client with connection pooling and fix resource leaks in qBittorrent client. (#898) 2025-07-29 21:56:36 +08:00
d6867b4ab6 fix(user): show admin password on first start (#883)
* fix: fix admin password not shown in first start
* chore: add time dependence

Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>

* fix: fix log format

Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>

---------

Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>
Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
2025-07-29 21:36:27 +08:00
11cf561307 fix(security): potential XSS vulnerabilities (#880)
* fix(security): potential XSS vulnerabilities

* chore: replace alist identifier to openlist identifier

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>

---------

Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>
Co-authored-by: ILoveScratch <ilovescratch@foxmail.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-29 20:17:11 +08:00
239b58f63e fix(ci):Disable linux/s390x Docker builds (#887) 2025-07-29 16:22:50 +08:00
7da06655cb feat(setting): add site version information (#859)
* feat(setting): add site version information

* feat(conf): update conf.WebVersion to rolling

* fix(static): update condition to check conf.Version instead of conf.WebVersion

* fix(build.sh): use rolling release for web frontend in dev and beta builds

* chore(build.sh): update GitAuthor to The OpenList Projects Contributors

* fix(static): update condition to check conf.WebVersion
2025-07-29 09:49:33 +08:00
e0b3a611ba feat(thunderx,pikpak): add offline download support for ThunderX; add ctx to specific PikPak functions (#879)
* feat(thunderx,pikpak): add offline download support for ThunderX; add ctx to specific PikPak functions

* Update internal/offline_download/tool/download.go

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: 花月喵梦 <152958106+nekohy@users.noreply.github.com>

---------

Signed-off-by: 花月喵梦 <152958106+nekohy@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-29 09:46:28 +08:00
be1ad08a83 feat(ci):Add Windows 7 and LoongArch Release build support (#857)
* feat:Add Windows 7 and LoongArch old world build support (#30)

* feat:Add Windows 7 and Loongson old world build support

- Add BuildWin7() function with patched Go compiler for Windows 7 compatibility
- Add BuildLoongOldWorld() function for linux-loong64-abi1.0 target
- Create Zig-based wrapper scripts for Windows 7 cross-compilation
- Integrate new build functions into existing release workflows

* fix(win7):Add MinGW-w64 toolchain and improve LoongArch ABI isolation

- Install MinGW-w64 cross-compilation toolchain for Win7 compatibility
- Replace Zig compiler wrappers with MinGW-w64 for Windows 7 builds
- Add Go build cache cleaning to prevent LoongArch ABI1.0/ABI2.0 cross-contamination
- Force clean rebuilds (-a flag) for LoongArch builds to ensure ABI compatibility

* feat: add Windows 7 build support to beta release workflow

* feat: add LoongArch ABI2.0 support alongside existing ABI1.0 build (#31)

- Add BuildWin7() function with patched Go compiler for Windows 7 compatibility
- Add BuildLoongOldWorld() function for linux-loong64-abi1.0 target
- Create Zig-based wrapper scripts for Windows 7 cross-compilation
- Integrate new build functions into existing release workflows
- Install MinGW-w64 cross-compilation toolchain for Win7 compatibility
- Replace Zig compiler wrappers with MinGW-w64 for Windows 7 builds
- Add Go build cache cleaning to prevent LoongArch ABI1.0/ABI2.0 cross-contamination
- Force clean rebuilds (-a flag) for LoongArch builds to ensure ABI compatibility

* [skip ci]refactor:Refactor LoongArch builds to separate glibc from musl compilation

* fix(go-cache):Improve error handling for Go module cache cleaning in LoongArch builds

* feat(build): Enhance LoongArch build process with improved toolchain setup and cache management

* fix(build): Update Windows 7 target naming in build scripts and workflows

* refactor(build): Replace MinGW-w64 with Zig for Windows 7 toolchain in build scripts

* chore(cgo): remove cgo-actions subproject
2025-07-27 00:27:31 +08:00
4e9c30f49d feat(fs): full support webdav cross-driver copy and move (#823)
* fix(fs): restore webdav cross-driver copy and move

* fix bug

* webdav支持复制、移动 文件夹

* 优化

* 。
2025-07-26 00:27:46 +08:00
0ee31a3f36 fix(crypt): wrong ContentLength 2025-07-25 19:55:22 +08:00
23bddf991e feat(drivers): enable local sorting for cloudreve, ilanzou (#840)
* feat(cloudreve): enable local sorting

* feat(ilanzou): enable local sorting
2025-07-25 18:01:19 +08:00
246 changed files with 10913 additions and 2823 deletions

56
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,56 @@
<!--
Provide a general summary of your changes in the Title above.
The PR title must start with `feat(): `, `docs(): `, `fix(): `, `style(): `, or `refactor(): `, `chore(): `. For example: `feat(component): add new feature`.
If it spans multiple components, use the main component as the prefix and enumerate in the title, describe in the body.
-->
<!--
在上方标题中提供您更改的总体摘要。
PR 标题需以 `feat(): `, `docs(): `, `fix(): `, `style(): `, `refactor(): `, `chore(): ` 其中之一开头,例如:`feat(component): 新增功能`
如果跨多个组件,请使用主要组件作为前缀,并在标题中枚举、描述中说明。
-->
## Description / 描述
<!-- Describe your changes in detail -->
<!-- 详细描述您的更改 -->
## Motivation and Context / 背景
<!-- Why is this change required? What problem does it solve? -->
<!-- 为什么需要此更改?它解决了什么问题? -->
<!-- If it fixes an open issue, please link to the issue here. -->
<!-- 如果修复了一个打开的issue请在此处链接到该issue -->
Closes #XXXX
<!-- or -->
<!-- 或者 -->
Relates to #XXXX
## How Has This Been Tested? / 测试
<!-- Please describe in detail how you tested your changes. -->
<!-- 请详细描述您如何测试更改 -->
## Checklist / 检查清单
<!-- Go over all the following points, and put an `x` in all the boxes that apply. -->
<!-- 检查以下所有要点,并在所有适用的框中打`x` -->
<!-- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
<!-- 如果您对其中任何一项不确定,请不要犹豫提问。我们会帮助您! -->
- [ ] I have read the [CONTRIBUTING](https://github.com/OpenListTeam/OpenList/blob/main/CONTRIBUTING.md) document.
我已阅读 [CONTRIBUTING](https://github.com/OpenListTeam/OpenList/blob/main/CONTRIBUTING.md) 文档。
- [ ] I have formatted my code with `go fmt` or [prettier](https://prettier.io/).
我已使用 `go fmt` 或 [prettier](https://prettier.io/) 格式化提交的代码。
- [ ] I have added appropriate labels to this PR (or mentioned needed labels in the description if lacking permissions).
我已为此 PR 添加了适当的标签(如无权限或需要的标签不存在,请在描述中说明,管理员将后续处理)。
- [ ] I have requested review from relevant code authors using the "Request review" feature when applicable.
我已在适当情况下使用"Request review"功能请求相关代码作者进行审查。
- [ ] I have updated the repository accordingly (If its needed).
我已相应更新了相关仓库(若适用)。
- [ ] [OpenList-Frontend](https://github.com/OpenListTeam/OpenList-Frontend) #XXXX
- [ ] [OpenList-Docs](https://github.com/OpenListTeam/OpenList-Docs) #XXXX

View File

@ -61,7 +61,7 @@ jobs:
strategy: strategy:
matrix: matrix:
include: include:
- target: "!(*musl*|*windows-arm64*|*android*|*freebsd*)" # xgo and loongarch - target: "!(*musl*|*windows-arm64*|*windows7-*|*android*|*freebsd*)" # xgo and loongarch
hash: "md5" hash: "md5"
- target: "linux-!(arm*)-musl*" #musl-not-arm - target: "linux-!(arm*)-musl*" #musl-not-arm
hash: "md5-linux-musl" hash: "md5-linux-musl"
@ -69,6 +69,8 @@ jobs:
hash: "md5-linux-musl-arm" hash: "md5-linux-musl-arm"
- target: "windows-arm64" #win-arm64 - target: "windows-arm64" #win-arm64
hash: "md5-windows-arm64" hash: "md5-windows-arm64"
- target: "windows7-*" #win7
hash: "md5-windows7"
- target: "android-*" #android - target: "android-*" #android
hash: "md5-android" hash: "md5-android"
- target: "freebsd-*" #freebsd - target: "freebsd-*" #freebsd
@ -85,12 +87,13 @@ jobs:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
with: with:
go-version: "1.24.5" go-version: "1.25.0"
- name: Setup web - name: Setup web
run: bash build.sh dev web run: bash build.sh dev web
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Build - name: Build
uses: OpenListTeam/cgo-actions@v1.2.2 uses: OpenListTeam/cgo-actions@v1.2.2
@ -103,10 +106,10 @@ jobs:
musl-base-url: "https://github.com/OpenListTeam/musl-compilers/releases/latest/download/" musl-base-url: "https://github.com/OpenListTeam/musl-compilers/releases/latest/download/"
x-flags: | x-flags: |
github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=OpenList github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=The OpenList Projects Contributors <noreply@openlist.team>
github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit
github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=dev github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=rolling
- name: Compress - name: Compress
run: | run: |

View File

@ -33,12 +33,13 @@ jobs:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
with: with:
go-version: "1.24.5" go-version: "1.25.0"
- name: Setup web - name: Setup web
run: bash build.sh dev web run: bash build.sh dev web
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Build - name: Build
uses: OpenListTeam/cgo-actions@v1.2.2 uses: OpenListTeam/cgo-actions@v1.2.2
@ -49,10 +50,10 @@ jobs:
out-dir: build out-dir: build
x-flags: | x-flags: |
github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=OpenList github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=The OpenList Projects Contributors <noreply@openlist.team>
github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit
github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=dev github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=rolling
output: openlist$ext output: openlist$ext
- name: Upload artifact - name: Upload artifact

View File

@ -46,7 +46,7 @@ jobs:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
with: with:
go-version: '1.24' go-version: '1.25.0'
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
@ -66,10 +66,12 @@ jobs:
bash build.sh release ${{ matrix.build-type == 'lite' && 'lite' || '' }} ${{ matrix.target-platform }} bash build.sh release ${{ matrix.build-type == 'lite' && 'lite' || '' }} ${{ matrix.target-platform }}
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload assets - name: Upload assets
uses: softprops/action-gh-release@v2 uses: softprops/action-gh-release@v2
with: with:
files: build/compress/* files: build/compress/*
prerelease: false prerelease: false
tag_name: ${{ github.event.release.tag_name }}

View File

@ -31,7 +31,7 @@ env:
REGISTRY: ghcr.io REGISTRY: ghcr.io
ARTIFACT_NAME: 'binaries_docker_release' ARTIFACT_NAME: 'binaries_docker_release'
ARTIFACT_NAME_LITE: 'binaries_docker_release_lite' ARTIFACT_NAME_LITE: 'binaries_docker_release_lite'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64' RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/ppc64le,linux/riscv64,linux/loong64' ### Temporarily disable Docker builds for linux/s390x architectures for unknown reasons.
IMAGE_PUSH: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }} IMAGE_PUSH: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }}
permissions: permissions:
@ -47,7 +47,7 @@ jobs:
- uses: actions/setup-go@v5 - uses: actions/setup-go@v5
with: with:
go-version: 'stable' go-version: '1.25.0'
- name: Cache Musl - name: Cache Musl
id: cache-musl id: cache-musl
@ -66,6 +66,7 @@ jobs:
run: bash build.sh release docker-multiplatform run: bash build.sh release docker-multiplatform
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
@ -86,7 +87,7 @@ jobs:
- uses: actions/setup-go@v5 - uses: actions/setup-go@v5
with: with:
go-version: 'stable' go-version: '1.25.0'
- name: Cache Musl - name: Cache Musl
id: cache-musl id: cache-musl
@ -105,6 +106,7 @@ jobs:
run: bash build.sh release lite docker-multiplatform run: bash build.sh release lite docker-multiplatform
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
@ -125,15 +127,19 @@ jobs:
image: ["latest", "ffmpeg", "aria2", "aio"] image: ["latest", "ffmpeg", "aria2", "aio"]
include: include:
- image: "latest" - image: "latest"
base_image_tag: "base"
build_arg: "" build_arg: ""
tag_favor: "" tag_favor: ""
- image: "ffmpeg" - image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-ffmpeg,onlatest=true" tag_favor: "suffix=-ffmpeg,onlatest=true"
- image: "aria2" - image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-aria2,onlatest=true" tag_favor: "suffix=-aria2,onlatest=true"
- image: "aio" - image: "aio"
base_image_tag: "aio"
build_arg: | build_arg: |
INSTALL_FFMPEG=true INSTALL_FFMPEG=true
INSTALL_ARIA2=true INSTALL_ARIA2=true
@ -189,7 +195,9 @@ jobs:
context: . context: .
file: Dockerfile.ci file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }} push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: ${{ matrix.build_arg }} build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }} platforms: ${{ env.RELEASE_PLATFORMS }}
@ -203,15 +211,19 @@ jobs:
image: ["latest", "ffmpeg", "aria2", "aio"] image: ["latest", "ffmpeg", "aria2", "aio"]
include: include:
- image: "latest" - image: "latest"
base_image_tag: "base"
build_arg: "" build_arg: ""
tag_favor: "suffix=-lite,onlatest=true" tag_favor: "suffix=-lite,onlatest=true"
- image: "ffmpeg" - image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-lite-ffmpeg,onlatest=true" tag_favor: "suffix=-lite-ffmpeg,onlatest=true"
- image: "aria2" - image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-lite-aria2,onlatest=true" tag_favor: "suffix=-lite-aria2,onlatest=true"
- image: "aio" - image: "aio"
base_image_tag: "aio"
build_arg: | build_arg: |
INSTALL_FFMPEG=true INSTALL_FFMPEG=true
INSTALL_ARIA2=true INSTALL_ARIA2=true
@ -267,7 +279,9 @@ jobs:
context: . context: .
file: Dockerfile.ci file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }} push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: ${{ matrix.build_arg }} build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }} platforms: ${{ env.RELEASE_PLATFORMS }}

38
.github/workflows/sync_repo.yml vendored Normal file
View File

@ -0,0 +1,38 @@
name: Sync to Gitee
on:
push:
branches:
- main
workflow_dispatch:
jobs:
sync:
runs-on: ubuntu-latest
name: Sync GitHub to Gitee
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup SSH
run: |
mkdir -p ~/.ssh
echo "${{ secrets.GITEE_SSH_PRIVATE_KEY }}" > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
ssh-keyscan gitee.com >> ~/.ssh/known_hosts
- name: Create single commit and push
run: |
git config user.name "GitHub Actions"
git config user.email "actions@github.com"
# Create a new branch
git checkout --orphan new-main
git add .
git commit -m "Sync from GitHub: $(date)"
# Add Gitee remote and force push
git remote add gitee ${{ vars.GITEE_REPO_URL }}
git push --force gitee new-main:main

View File

@ -20,7 +20,7 @@ env:
IMAGE_NAME_DOCKERHUB: openlist IMAGE_NAME_DOCKERHUB: openlist
REGISTRY: ghcr.io REGISTRY: ghcr.io
ARTIFACT_NAME: 'binaries_docker_release' ARTIFACT_NAME: 'binaries_docker_release'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64' RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/ppc64le,linux/riscv64,linux/loong64' ### Temporarily disable Docker builds for linux/s390x architectures for unknown reasons.
IMAGE_PUSH: ${{ github.event_name == 'push' }} IMAGE_PUSH: ${{ github.event_name == 'push' }}
IMAGE_TAGS_BETA: | IMAGE_TAGS_BETA: |
type=ref,event=pr type=ref,event=pr
@ -36,7 +36,7 @@ jobs:
- uses: actions/setup-go@v5 - uses: actions/setup-go@v5
with: with:
go-version: 'stable' go-version: '1.25.0'
- name: Cache Musl - name: Cache Musl
id: cache-musl id: cache-musl
@ -55,6 +55,7 @@ jobs:
run: bash build.sh beta docker-multiplatform run: bash build.sh beta docker-multiplatform
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
@ -77,15 +78,19 @@ jobs:
image: ["latest", "ffmpeg", "aria2", "aio"] image: ["latest", "ffmpeg", "aria2", "aio"]
include: include:
- image: "latest" - image: "latest"
base_image_tag: "base"
build_arg: "" build_arg: ""
tag_favor: "" tag_favor: ""
- image: "ffmpeg" - image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-ffmpeg,onlatest=true" tag_favor: "suffix=-ffmpeg,onlatest=true"
- image: "aria2" - image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-aria2,onlatest=true" tag_favor: "suffix=-aria2,onlatest=true"
- image: "aio" - image: "aio"
base_image_tag: "aio"
build_arg: | build_arg: |
INSTALL_FFMPEG=true INSTALL_FFMPEG=true
INSTALL_ARIA2=true INSTALL_ARIA2=true
@ -137,7 +142,9 @@ jobs:
context: . context: .
file: Dockerfile.ci file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }} push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: ${{ matrix.build_arg }} build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }} platforms: ${{ env.RELEASE_PLATFORMS }}

View File

@ -2,106 +2,76 @@
## Setup your machine ## Setup your machine
`OpenList` is written in [Go](https://golang.org/) and [React](https://reactjs.org/). `OpenList` is written in [Go](https://golang.org/) and [SolidJS](https://www.solidjs.com/).
Prerequisites: Prerequisites:
- [git](https://git-scm.com) - [git](https://git-scm.com)
- [Go 1.20+](https://golang.org/doc/install) - [Go 1.24+](https://golang.org/doc/install)
- [gcc](https://gcc.gnu.org/) - [gcc](https://gcc.gnu.org/)
- [nodejs](https://nodejs.org/) - [nodejs](https://nodejs.org/)
Clone `OpenList` and `OpenList-Frontend` anywhere: ## Cloning a fork
Fork and clone `OpenList` and `OpenList-Frontend` anywhere:
```shell ```shell
$ git clone https://github.com/OpenListTeam/OpenList.git $ git clone https://github.com/<your-username>/OpenList.git
$ git clone --recurse-submodules https://github.com/OpenListTeam/OpenList-Frontend.git $ git clone --recurse-submodules https://github.com/<your-username>/OpenList-Frontend.git
```
## Creating a branch
Create a new branch from the `main` branch, with an appropriate name.
```shell
$ git checkout -b <branch-name>
``` ```
You should switch to the `main` branch for development.
## Preview your change ## Preview your change
### backend ### backend
```shell ```shell
$ go run main.go $ go run main.go
``` ```
### frontend ### frontend
```shell ```shell
$ pnpm dev $ pnpm dev
``` ```
## Add a new driver ## Add a new driver
Copy `drivers/template` folder and rename it, and follow the comments in it. Copy `drivers/template` folder and rename it, and follow the comments in it.
## Create a commit ## Create a commit
Commit messages should be well formatted, and to make that "standardized". Commit messages should be well formatted, and to make that "standardized".
### Commit Message Format Submit your pull request. For PR titles, follow [Conventional Commits](https://www.conventionalcommits.org).
Each commit message consists of a **header**, a **body** and a **footer**. The header has a special
format that includes a **type**, a **scope** and a **subject**:
``` https://github.com/OpenListTeam/OpenList/issues/376
<type>(<scope>): <subject>
<BLANK LINE>
<body>
<BLANK LINE>
<footer>
```
The **header** is mandatory and the **scope** of the header is optional. It's suggested to sign your commits. See: [How to sign commits](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits)
Any line of the commit message cannot be longer than 100 characters! This allows the message to be easier
to read on GitHub as well as in various git tools.
### Revert
If the commit reverts a previous commit, it should begin with `revert: `, followed by the header
of the reverted commit.
In the body it should say: `This reverts commit <hash>.`, where the hash is the SHA of the commit
being reverted.
### Type
Must be one of the following:
* **feat**: A new feature
* **fix**: A bug fix
* **docs**: Documentation only changes
* **style**: Changes that do not affect the meaning of the code (white-space, formatting, missing
semi-colons, etc)
* **refactor**: A code change that neither fixes a bug nor adds a feature
* **perf**: A code change that improves performance
* **test**: Adding missing or correcting existing tests
* **build**: Affects project builds or dependency modifications
* **revert**: Restore the previous commit
* **ci**: Continuous integration of related file modifications
* **chore**: Changes to the build process or auxiliary tools and libraries such as documentation
generation
* **release**: Release a new version
### Scope
The scope could be anything specifying place of the commit change. For example `$location`,
`$browser`, `$compile`, `$rootScope`, `ngHref`, `ngClick`, `ngView`, etc...
You can use `*` when the change affects more than a single scope.
### Subject
The subject contains succinct description of the change:
* use the imperative, present tense: "change" not "changed" nor "changes"
* don't capitalize first letter
* no dot (.) at the end
### Body
Just as in the **subject**, use the imperative, present tense: "change" not "changed" nor "changes".
The body should include the motivation for the change and contrast this with previous behavior.
### Footer
The footer should contain any information about **Breaking Changes** and is also the place to
[reference GitHub issues that this commit closes](https://help.github.com/articles/closing-issues-via-commit-messages/).
**Breaking Changes** should start with the word `BREAKING CHANGE:` with a space or two newlines.
The rest of the commit message is then used for this.
## Submit a pull request ## Submit a pull request
Push your branch to your `openlist` fork and open a pull request against the Please make sure your code has been formatted with `go fmt` or [prettier](https://prettier.io/) before submitting.
`main` branch.
Push your branch to your `openlist` fork and open a pull request against the `main` branch.
## Merge your pull request
Your pull request will be merged after review. Please wait for the maintainer to merge your pull request after review.
At least 1 approving review is required by reviewers with write access. You can also request a review from maintainers.
## Delete your branch
(Optional) After your pull request is merged, you can delete your branch.
---
Thank you for your contribution! Let's make OpenList better together!

View File

@ -1,4 +1,7 @@
FROM docker.io/library/alpine:edge AS builder ### Default image is base. You can add other support by modifying BASE_IMAGE_TAG. The following parameters are supported: base (default), aria2, ffmpeg, aio
ARG BASE_IMAGE_TAG=base
FROM alpine:edge AS builder
LABEL stage=go-builder LABEL stage=go-builder
WORKDIR /app/ WORKDIR /app/
RUN apk add --no-cache bash curl jq gcc git go musl-dev RUN apk add --no-cache bash curl jq gcc git go musl-dev
@ -7,51 +10,27 @@ RUN go mod download
COPY ./ ./ COPY ./ ./
RUN bash build.sh release docker RUN bash build.sh release docker
FROM alpine:edge FROM openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
LABEL MAINTAINER="OpenList"
ARG INSTALL_FFMPEG=false ARG INSTALL_FFMPEG=false
ARG INSTALL_ARIA2=false ARG INSTALL_ARIA2=false
LABEL MAINTAINER="OpenList" ARG USER=openlist
ARG UID=1001
ARG GID=1001
WORKDIR /opt/openlist/ WORKDIR /opt/openlist/
RUN apk update && \ RUN addgroup -g ${GID} ${USER} && \
apk upgrade --no-cache && \ adduser -D -u ${UID} -G ${USER} ${USER} && \
apk add --no-cache bash ca-certificates su-exec tzdata runit; \ mkdir -p /opt/openlist/data
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
mkdir -p /opt/aria2/.aria2 && \
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
mkdir -p /opt/service/stop/aria2/log && \
echo '#!/bin/sh' > /opt/service/stop/aria2/run && \
echo 'exec 2>&1' >> /opt/service/stop/aria2/run && \
echo 'exec aria2c --enable-rpc --rpc-allow-origin-all --conf-path=/opt/aria2/.aria2/aria2.conf' >> /opt/service/stop/aria2/run && \
echo '#!/bin/sh' > /opt/service/stop/aria2/log/run && \
echo 'mkdir -p /opt/openlist/data/log/aria2 2>/dev/null' >> /opt/service/stop/aria2/log/run && \
echo 'exec svlogd /opt/openlist/data/log/aria2' >> /opt/service/stop/aria2/log/run && \
chmod +x /opt/service/stop/aria2/run /opt/service/stop/aria2/log/run && \
touch /opt/aria2/.aria2/aria2.session && \
/opt/aria2/.aria2/tracker.sh ; \
rm -rf /var/cache/apk/*
RUN mkdir -p /opt/service/start/openlist && \ COPY --from=builder --chmod=755 --chown=${UID}:${GID} /app/bin/openlist ./
echo '#!/bin/sh' > /opt/service/start/openlist/run && \ COPY --chmod=755 --chown=${UID}:${GID} entrypoint.sh /entrypoint.sh
echo 'exec 2>&1' >> /opt/service/start/openlist/run && \
echo 'cd /opt/openlist' >> /opt/service/start/openlist/run && \
echo 'exec ./openlist server --no-prefix' >> /opt/service/start/openlist/run && \
chmod +x /opt/service/start/openlist/run
COPY --chmod=755 --from=builder /app/bin/openlist ./ USER ${USER}
COPY --chmod=755 entrypoint.sh /entrypoint.sh
RUN /entrypoint.sh version RUN /entrypoint.sh version
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2} ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
VOLUME /opt/openlist/data/ VOLUME /opt/openlist/data/
EXPOSE 5244 5245 EXPOSE 5244 5245
CMD [ "/entrypoint.sh" ] CMD [ "/entrypoint.sh" ]

View File

@ -1,49 +1,26 @@
FROM docker.io/library/alpine:edge ARG BASE_IMAGE_TAG=base
FROM ghcr.io/openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
LABEL MAINTAINER="OpenList"
ARG TARGETPLATFORM ARG TARGETPLATFORM
ARG INSTALL_FFMPEG=false ARG INSTALL_FFMPEG=false
ARG INSTALL_ARIA2=false ARG INSTALL_ARIA2=false
LABEL MAINTAINER="OpenList" ARG USER=openlist
ARG UID=1001
ARG GID=1001
WORKDIR /opt/openlist/ WORKDIR /opt/openlist/
RUN apk update && \ RUN addgroup -g ${GID} ${USER} && \
apk upgrade --no-cache && \ adduser -D -u ${UID} -G ${USER} ${USER} && \
apk add --no-cache bash ca-certificates su-exec tzdata runit; \ mkdir -p /opt/openlist/data
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
mkdir -p /opt/aria2/.aria2 && \
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
mkdir -p /opt/service/stop/aria2/log && \
echo '#!/bin/sh' > /opt/service/stop/aria2/run && \
echo 'exec 2>&1' >> /opt/service/stop/aria2/run && \
echo 'exec aria2c --enable-rpc --rpc-allow-origin-all --conf-path=/opt/aria2/.aria2/aria2.conf' >> /opt/service/stop/aria2/run && \
echo '#!/bin/sh' > /opt/service/stop/aria2/log/run && \
echo 'mkdir -p /opt/openlist/data/log/aria2 2>/dev/null' >> /opt/service/stop/aria2/log/run && \
echo 'exec svlogd /opt/openlist/data/log/aria2' >> /opt/service/stop/aria2/log/run && \
chmod +x /opt/service/stop/aria2/run /opt/service/stop/aria2/log/run && \
touch /opt/aria2/.aria2/aria2.session && \
/opt/aria2/.aria2/tracker.sh ; \
rm -rf /var/cache/apk/*
RUN mkdir -p /opt/service/start/openlist && \ COPY --chmod=755 --chown=${UID}:${GID} /build/${TARGETPLATFORM}/openlist ./
echo '#!/bin/sh' > /opt/service/start/openlist/run && \ COPY --chmod=755 --chown=${UID}:${GID} entrypoint.sh /entrypoint.sh
echo 'exec 2>&1' >> /opt/service/start/openlist/run && \
echo 'cd /opt/openlist' >> /opt/service/start/openlist/run && \
echo 'exec ./openlist server --no-prefix' >> /opt/service/start/openlist/run && \
chmod +x /opt/service/start/openlist/run
COPY --chmod=755 /build/${TARGETPLATFORM}/openlist ./ USER ${USER}
COPY --chmod=755 entrypoint.sh /entrypoint.sh
RUN /entrypoint.sh version RUN /entrypoint.sh version
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2} ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
VOLUME /opt/openlist/data/ VOLUME /opt/openlist/data/
EXPOSE 5244 5245 EXPOSE 5244 5245
CMD [ "/entrypoint.sh" ] CMD [ "/entrypoint.sh" ]

View File

@ -74,7 +74,6 @@ Thank you for your support and understanding of the OpenList project.
- [x] [Thunder](https://pan.xunlei.com) - [x] [Thunder](https://pan.xunlei.com)
- [x] [Lanzou](https://www.lanzou.com) - [x] [Lanzou](https://www.lanzou.com)
- [x] [ILanzou](https://www.ilanzou.com) - [x] [ILanzou](https://www.ilanzou.com)
- [x] [Aliyundrive share](https://www.alipan.com)
- [x] [Google photo](https://photos.google.com) - [x] [Google photo](https://photos.google.com)
- [x] [Mega.nz](https://mega.nz) - [x] [Mega.nz](https://mega.nz)
- [x] [Baidu photo](https://photo.baidu.com) - [x] [Baidu photo](https://photo.baidu.com)
@ -85,6 +84,16 @@ Thank you for your support and understanding of the OpenList project.
- [x] [FeijiPan](https://www.feijipan.com) - [x] [FeijiPan](https://www.feijipan.com)
- [x] [dogecloud](https://www.dogecloud.com/product/oss) - [x] [dogecloud](https://www.dogecloud.com/product/oss)
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs) - [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
- [x] [Chaoxing](https://www.chaoxing.com)
- [x] [CNB](https://cnb.cool/)
- [x] [Degoo](https://degoo.com)
- [x] [Doubao](https://www.doubao.com)
- [x] [Febbox](https://www.febbox.com)
- [x] [GitHub](https://github.com)
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
- [x] [Weiyun](https://www.weiyun.com)
- [x] Easy to deploy and out-of-the-box - [x] Easy to deploy and out-of-the-box
- [x] File preview (PDF, markdown, code, plain text, ...) - [x] File preview (PDF, markdown, code, plain text, ...)
- [x] Image preview in gallery mode - [x] Image preview in gallery mode

View File

@ -74,7 +74,6 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
- [x] [迅雷网盘](https://pan.xunlei.com) - [x] [迅雷网盘](https://pan.xunlei.com)
- [x] [蓝奏云](https://www.lanzou.com) - [x] [蓝奏云](https://www.lanzou.com)
- [x] [蓝奏云优享版](https://www.ilanzou.com) - [x] [蓝奏云优享版](https://www.ilanzou.com)
- [x] [阿里云盘分享](https://www.alipan.com)
- [x] [Google 相册](https://photos.google.com) - [x] [Google 相册](https://photos.google.com)
- [x] [Mega.nz](https://mega.nz) - [x] [Mega.nz](https://mega.nz)
- [x] [百度相册](https://photo.baidu.com) - [x] [百度相册](https://photo.baidu.com)
@ -85,6 +84,15 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
- [x] [飞机盘](https://www.feijipan.com) - [x] [飞机盘](https://www.feijipan.com)
- [x] [多吉云](https://www.dogecloud.com/product/oss) - [x] [多吉云](https://www.dogecloud.com/product/oss)
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs) - [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
- [x] [超星](https://www.chaoxing.com)
- [x] [CNB](https://cnb.cool/)
- [x] [Degoo](https://degoo.com)
- [x] [豆包](https://www.doubao.com)
- [x] [Febbox](https://www.febbox.com)
- [x] [GitHub](https://github.com)
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
- [x] [微云](https://www.weiyun.com)
- [x] 部署方便,开箱即用 - [x] 部署方便,开箱即用
- [x] 文件预览PDF、markdown、代码、纯文本等 - [x] 文件预览PDF、markdown、代码、纯文本等
- [x] 画廊模式下的图片预览 - [x] 画廊模式下的图片预览

View File

@ -74,7 +74,6 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
- [x] [Thunder](https://pan.xunlei.com) - [x] [Thunder](https://pan.xunlei.com)
- [x] [Lanzou](https://www.lanzou.com) - [x] [Lanzou](https://www.lanzou.com)
- [x] [ILanzou](https://www.ilanzou.com) - [x] [ILanzou](https://www.ilanzou.com)
- [x] [Aliyundrive share](https://www.alipan.com)
- [x] [Google photo](https://photos.google.com) - [x] [Google photo](https://photos.google.com)
- [x] [Mega.nz](https://mega.nz) - [x] [Mega.nz](https://mega.nz)
- [x] [Baidu photo](https://photo.baidu.com) - [x] [Baidu photo](https://photo.baidu.com)
@ -85,6 +84,15 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
- [x] [FeijiPan](https://www.feijipan.com) - [x] [FeijiPan](https://www.feijipan.com)
- [x] [dogecloud](https://www.dogecloud.com/product/oss) - [x] [dogecloud](https://www.dogecloud.com/product/oss)
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs) - [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
- [x] [Chaoxing](https://www.chaoxing.com)
- [x] [CNB](https://cnb.cool/)
- [x] [Degoo](https://degoo.com)
- [x] [Doubao](https://www.doubao.com)
- [x] [Febbox](https://www.febbox.com)
- [x] [GitHub](https://github.com)
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
- [x] [Weiyun](https://www.weiyun.com)
- [x] 簡単にデプロイでき、すぐに使える - [x] 簡単にデプロイでき、すぐに使える
- [x] ファイルプレビューPDF、markdown、コード、テキストなど - [x] ファイルプレビューPDF、markdown、コード、テキストなど
- [x] ギャラリーモードでの画像プレビュー - [x] ギャラリーモードでの画像プレビュー

View File

@ -74,7 +74,6 @@ Dank u voor uw ondersteuning en begrip
- [x] [Thunder](https://pan.xunlei.com) - [x] [Thunder](https://pan.xunlei.com)
- [x] [Lanzou](https://www.lanzou.com) - [x] [Lanzou](https://www.lanzou.com)
- [x] [ILanzou](https://www.ilanzou.com) - [x] [ILanzou](https://www.ilanzou.com)
- [x] [Aliyundrive share](https://www.alipan.com)
- [x] [Google photo](https://photos.google.com) - [x] [Google photo](https://photos.google.com)
- [x] [Mega.nz](https://mega.nz) - [x] [Mega.nz](https://mega.nz)
- [x] [Baidu photo](https://photo.baidu.com) - [x] [Baidu photo](https://photo.baidu.com)
@ -85,6 +84,15 @@ Dank u voor uw ondersteuning en begrip
- [x] [FeijiPan](https://www.feijipan.com) - [x] [FeijiPan](https://www.feijipan.com)
- [x] [dogecloud](https://www.dogecloud.com/product/oss) - [x] [dogecloud](https://www.dogecloud.com/product/oss)
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs) - [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
- [x] [Chaoxing](https://www.chaoxing.com)
- [x] [CNB](https://cnb.cool/)
- [x] [Degoo](https://degoo.com)
- [x] [Doubao](https://www.doubao.com)
- [x] [Febbox](https://www.febbox.com)
- [x] [GitHub](https://github.com)
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
- [x] [Weiyun](https://www.weiyun.com)
- [x] Eenvoudig te implementeren en direct te gebruiken - [x] Eenvoudig te implementeren en direct te gebruiken
- [x] Bestandsvoorbeeld (PDF, markdown, code, platte tekst, ...) - [x] Bestandsvoorbeeld (PDF, markdown, code, platte tekst, ...)
- [x] Afbeeldingsvoorbeeld in galerijweergave - [x] Afbeeldingsvoorbeeld in galerijweergave

246
build.sh
View File

@ -4,6 +4,9 @@ builtAt="$(date +'%F %T %z')"
gitAuthor="The OpenList Projects Contributors <noreply@openlist.team>" gitAuthor="The OpenList Projects Contributors <noreply@openlist.team>"
gitCommit=$(git log --pretty=format:"%h" -1) gitCommit=$(git log --pretty=format:"%h" -1)
# Set frontend repository, default to OpenListTeam/OpenList-Frontend
frontendRepo="${FRONTEND_REPO:-OpenListTeam/OpenList-Frontend}"
githubAuthArgs="" githubAuthArgs=""
if [ -n "$GITHUB_TOKEN" ]; then if [ -n "$GITHUB_TOKEN" ]; then
githubAuthArgs="--header \"Authorization: Bearer $GITHUB_TOKEN\"" githubAuthArgs="--header \"Authorization: Bearer $GITHUB_TOKEN\""
@ -17,15 +20,15 @@ fi
if [ "$1" = "dev" ]; then if [ "$1" = "dev" ]; then
version="dev" version="dev"
webVersion="dev" webVersion="rolling"
elif [ "$1" = "beta" ]; then elif [ "$1" = "beta" ]; then
version="beta" version="beta"
webVersion="dev" webVersion="rolling"
else else
git tag -d beta || true git tag -d beta || true
# Always true if there's no tag # Always true if there's no tag
version=$(git describe --abbrev=0 --tags 2>/dev/null || echo "v0.0.0") version=$(git describe --abbrev=0 --tags 2>/dev/null || echo "v0.0.0")
webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g') webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/$frontendRepo/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
fi fi
echo "backend version: $version" echo "backend version: $version"
@ -45,30 +48,21 @@ ldflags="\
-X 'github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=$webVersion' \ -X 'github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=$webVersion' \
" "
FetchWebDev() { FetchWebRolling() {
pre_release_tag=$(eval "curl -fsSL --max-time 2 $githubAuthArgs https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases" | jq -r 'map(select(.prerelease)) | first | .tag_name') pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/tags/rolling\"")
if [ -z "$pre_release_tag" ] || [ "$pre_release_tag" == "null" ]; then
# fall back to latest release
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"")
else
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/tags/$pre_release_tag\"")
fi
pre_release_assets=$(echo "$pre_release_json" | jq -r '.assets[].browser_download_url') pre_release_assets=$(echo "$pre_release_json" | jq -r '.assets[].browser_download_url')
if [ "$useLite" = true ]; then # There is no lite for rolling
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist-lite" | grep "\.tar\.gz$") pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist" | grep -v "lite" | grep "\.tar\.gz$")
else
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist" | grep -v "lite" | grep "\.tar\.gz$")
fi
curl -fsSL "$pre_release_tar_url" -o web-dist-dev.tar.gz curl -fsSL "$pre_release_tar_url" -o dist.tar.gz
rm -rf public/dist && mkdir -p public/dist rm -rf public/dist && mkdir -p public/dist
tar -zxvf web-dist-dev.tar.gz -C public/dist tar -zxvf dist.tar.gz -C public/dist
rm -rf web-dist-dev.tar.gz rm -rf dist.tar.gz
} }
FetchWebRelease() { FetchWebRelease() {
release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"") release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/latest\"")
release_assets=$(echo "$release_json" | jq -r '.assets[].browser_download_url') release_assets=$(echo "$release_json" | jq -r '.assets[].browser_download_url')
if [ "$useLite" = true ]; then if [ "$useLite" = true ]; then
@ -95,6 +89,45 @@ BuildWinArm64() {
go build -o "$1" -ldflags="$ldflags" -tags=jsoniter . go build -o "$1" -ldflags="$ldflags" -tags=jsoniter .
} }
BuildWin7() {
# Setup Win7 Go compiler (patched version that supports Windows 7)
go_version=$(go version | grep -o 'go[0-9]\+\.[0-9]\+\.[0-9]\+' | sed 's/go//')
echo "Detected Go version: $go_version"
curl -fsSL --retry 3 -o go-win7.zip -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/XTLS/go-win7/releases/download/patched-${go_version}/go-for-win7-linux-amd64.zip"
rm -rf go-win7
unzip go-win7.zip -d go-win7
rm go-win7.zip
# Set permissions for all wrapper files
chmod +x ./wrapper/zcc-win7
chmod +x ./wrapper/zcxx-win7
chmod +x ./wrapper/zcc-win7-386
chmod +x ./wrapper/zcxx-win7-386
# Build for both 386 and amd64 architectures
for arch in "386" "amd64"; do
echo "building for windows7-${arch}"
export GOOS=windows
export GOARCH=${arch}
export CGO_ENABLED=1
# Use architecture-specific wrapper files
if [ "$arch" = "386" ]; then
export CC=$(pwd)/wrapper/zcc-win7-386
export CXX=$(pwd)/wrapper/zcxx-win7-386
else
export CC=$(pwd)/wrapper/zcc-win7
export CXX=$(pwd)/wrapper/zcxx-win7
fi
# Use the patched Go compiler for Win7 compatibility
$(pwd)/go-win7/bin/go build -o "${1}-${arch}.exe" -ldflags="$ldflags" -tags=jsoniter .
done
}
BuildDev() { BuildDev() {
rm -rf .git/ rm -rf .git/
mkdir -p "dist" mkdir -p "dist"
@ -134,7 +167,7 @@ BuildDocker() {
PrepareBuildDockerMusl() { PrepareBuildDockerMusl() {
mkdir -p build/musl-libs mkdir -p build/musl-libs
BASE="https://github.com/OpenListTeam/musl-compilers/releases/latest/download/" BASE="https://github.com/OpenListTeam/musl-compilers/releases/latest/download/"
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross) FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross loongarch64-linux-musl-cross) ## Disable s390x-linux-musl-cross builds
for i in "${FILES[@]}"; do for i in "${FILES[@]}"; do
url="${BASE}${i}.tgz" url="${BASE}${i}.tgz"
lib_tgz="build/${i}.tgz" lib_tgz="build/${i}.tgz"
@ -153,8 +186,8 @@ BuildDockerMultiplatform() {
docker_lflags="--extldflags '-static -fpic' $ldflags" docker_lflags="--extldflags '-static -fpic' $ldflags"
export CGO_ENABLED=1 export CGO_ENABLED=1
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x linux-riscv64 linux-ppc64le) OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-riscv64 linux-ppc64le linux-loong64) ## Disable linux-s390x builds
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc) CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc loongarch64-linux-musl-gcc) ## Disable s390x-linux-musl-gcc builds
for i in "${!OS_ARCHES[@]}"; do for i in "${!OS_ARCHES[@]}"; do
os_arch=${OS_ARCHES[$i]} os_arch=${OS_ARCHES[$i]}
cgo_cc=${CGO_ARGS[$i]} cgo_cc=${CGO_ARGS[$i]}
@ -186,12 +219,171 @@ BuildRelease() {
rm -rf .git/ rm -rf .git/
mkdir -p "build" mkdir -p "build"
BuildWinArm64 ./build/"$appName"-windows-arm64.exe BuildWinArm64 ./build/"$appName"-windows-arm64.exe
BuildWin7 ./build/"$appName"-windows7
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter . xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
# why? Because some target platforms seem to have issues with upx compression # why? Because some target platforms seem to have issues with upx compression
# upx -9 ./"$appName"-linux-amd64 # upx -9 ./"$appName"-linux-amd64
# cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe # cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
# upx -9 ./"$appName"-windows-amd64-upx.exe # upx -9 ./"$appName"-windows-amd64-upx.exe
mv "$appName"-* build mv "$appName"-* build
# Build LoongArch with glibc (both old world abi1.0 and new world abi2.0)
# Separate from musl builds to avoid cache conflicts
BuildLoongGLIBC ./build/$appName-linux-loong64-abi1.0 abi1.0
BuildLoongGLIBC ./build/$appName-linux-loong64 abi2.0
}
BuildLoongGLIBC() {
local target_abi="$2"
local output_file="$1"
local oldWorldGoVersion="1.25.0"
if [ "$target_abi" = "abi1.0" ]; then
echo building for linux-loong64-abi1.0
else
echo building for linux-loong64-abi2.0
target_abi="abi2.0" # Default to abi2.0 if not specified
fi
# Note: No longer need global cache cleanup since ABI1.0 uses isolated cache directory
echo "Using optimized cache strategy: ABI1.0 has isolated cache, ABI2.0 uses standard cache"
if [ "$target_abi" = "abi1.0" ]; then
# Setup abi1.0 toolchain and patched Go compiler similar to cgo-action implementation
echo "Setting up Loongson old-world ABI1.0 toolchain and patched Go compiler..."
# Download and setup patched Go compiler for old-world
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250821/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
-o go-loong64-abi1.0.tar.gz; then
echo "Error: Failed to download patched Go compiler for old-world ABI1.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250821/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
-o go-loong64-abi1.0.tar.gz || true
fi
return 1
fi
rm -rf go-loong64-abi1.0
mkdir go-loong64-abi1.0
if ! tar -xzf go-loong64-abi1.0.tar.gz -C go-loong64-abi1.0 --strip-components=1; then
echo "Error: Failed to extract patched Go compiler"
return 1
fi
rm go-loong64-abi1.0.tar.gz
# Download and setup GCC toolchain for old-world
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/loongson-gnu-toolchain-8.3.novec-x86_64-loongarch64-linux-gnu-rc1.1.tar.xz" \
-o gcc8-loong64-abi1.0.tar.xz; then
echo "Error: Failed to download GCC toolchain for old-world ABI1.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/loongson-gnu-toolchain-8.3.novec-x86_64-loongarch64-linux-gnu-rc1.1.tar.xz" \
-o gcc8-loong64-abi1.0.tar.xz || true
fi
return 1
fi
rm -rf gcc8-loong64-abi1.0
mkdir gcc8-loong64-abi1.0
if ! tar -Jxf gcc8-loong64-abi1.0.tar.xz -C gcc8-loong64-abi1.0 --strip-components=1; then
echo "Error: Failed to extract GCC toolchain"
return 1
fi
rm gcc8-loong64-abi1.0.tar.xz
# Setup separate cache directory for ABI1.0 to avoid cache pollution
abi1_cache_dir="$(pwd)/go-loong64-abi1.0-cache"
mkdir -p "$abi1_cache_dir"
echo "Using separate cache directory for ABI1.0: $abi1_cache_dir"
# Use patched Go compiler for old-world build (critical for ABI1.0 compatibility)
echo "Building with patched Go compiler for old-world ABI1.0..."
echo "Using isolated cache directory: $abi1_cache_dir"
# Use env command to set environment variables locally without affecting global environment
if ! env GOOS=linux GOARCH=loong64 \
CC="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc" \
CXX="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++" \
CGO_ENABLED=1 \
GOCACHE="$abi1_cache_dir" \
$(pwd)/go-loong64-abi1.0/bin/go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed with patched Go compiler"
echo "Attempting retry with cache cleanup..."
env GOCACHE="$abi1_cache_dir" $(pwd)/go-loong64-abi1.0/bin/go clean -cache
if ! env GOOS=linux GOARCH=loong64 \
CC="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc" \
CXX="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++" \
CGO_ENABLED=1 \
GOCACHE="$abi1_cache_dir" \
$(pwd)/go-loong64-abi1.0/bin/go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed again after cache cleanup"
echo "Build environment details:"
echo "GOOS=linux"
echo "GOARCH=loong64"
echo "CC=$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc"
echo "CXX=$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++"
echo "CGO_ENABLED=1"
echo "GOCACHE=$abi1_cache_dir"
echo "Go version: $($(pwd)/go-loong64-abi1.0/bin/go version)"
echo "GCC version: $($(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc --version | head -1)"
return 1
fi
fi
else
# Setup abi2.0 toolchain for new world glibc build
echo "Setting up new-world ABI2.0 toolchain..."
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/cross-tools/releases/download/20250507/x86_64-cross-tools-loongarch64-unknown-linux-gnu-legacy.tar.xz" \
-o gcc12-loong64-abi2.0.tar.xz; then
echo "Error: Failed to download GCC toolchain for new-world ABI2.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/cross-tools/releases/download/20250507/x86_64-cross-tools-loongarch64-unknown-linux-gnu-legacy.tar.xz" \
-o gcc12-loong64-abi2.0.tar.xz || true
fi
return 1
fi
rm -rf gcc12-loong64-abi2.0
mkdir gcc12-loong64-abi2.0
if ! tar -Jxf gcc12-loong64-abi2.0.tar.xz -C gcc12-loong64-abi2.0 --strip-components=1; then
echo "Error: Failed to extract GCC toolchain"
return 1
fi
rm gcc12-loong64-abi2.0.tar.xz
export GOOS=linux
export GOARCH=loong64
export CC=$(pwd)/gcc12-loong64-abi2.0/bin/loongarch64-unknown-linux-gnu-gcc
export CXX=$(pwd)/gcc12-loong64-abi2.0/bin/loongarch64-unknown-linux-gnu-g++
export CGO_ENABLED=1
# Use standard Go compiler for new-world build
echo "Building with standard Go compiler for new-world ABI2.0..."
if ! go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed with standard Go compiler"
echo "Attempting retry with cache cleanup..."
go clean -cache
if ! go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed again after cache cleanup"
echo "Build environment details:"
echo "GOOS=$GOOS"
echo "GOARCH=$GOARCH"
echo "CC=$CC"
echo "CXX=$CXX"
echo "CGO_ENABLED=$CGO_ENABLED"
echo "Go version: $(go version)"
echo "GCC version: $($CC --version | head -1)"
return 1
fi
fi
fi
} }
BuildReleaseLinuxMusl() { BuildReleaseLinuxMusl() {
@ -249,6 +441,7 @@ BuildReleaseLinuxMuslArm() {
done done
} }
BuildReleaseAndroid() { BuildReleaseAndroid() {
rm -rf .git/ rm -rf .git/
mkdir -p "build" mkdir -p "build"
@ -344,7 +537,7 @@ MakeRelease() {
tar -czvf compress/"$i$liteSuffix".tar.gz "$appName" tar -czvf compress/"$i$liteSuffix".tar.gz "$appName"
rm -f "$appName" rm -f "$appName"
done done
for i in $(find . -type f -name "$appName-windows-*"); do for i in $(find . -type f \( -name "$appName-windows-*" -o -name "$appName-windows7-*" \)); do
cp "$i" "$appName".exe cp "$i" "$appName".exe
zip compress/$(echo $i | sed 's/\.[^.]*$//')$liteSuffix.zip "$appName".exe zip compress/$(echo $i | sed 's/\.[^.]*$//')$liteSuffix.zip "$appName".exe
rm -f "$appName".exe rm -f "$appName".exe
@ -391,7 +584,7 @@ for arg in "$@"; do
done done
if [ "$buildType" = "dev" ]; then if [ "$buildType" = "dev" ]; then
FetchWebDev FetchWebRolling
if [ "$dockerType" = "docker" ]; then if [ "$dockerType" = "docker" ]; then
BuildDocker BuildDocker
elif [ "$dockerType" = "docker-multiplatform" ]; then elif [ "$dockerType" = "docker-multiplatform" ]; then
@ -403,7 +596,7 @@ if [ "$buildType" = "dev" ]; then
fi fi
elif [ "$buildType" = "release" -o "$buildType" = "beta" ]; then elif [ "$buildType" = "release" -o "$buildType" = "beta" ]; then
if [ "$buildType" = "beta" ]; then if [ "$buildType" = "beta" ]; then
FetchWebDev FetchWebRolling
else else
FetchWebRelease FetchWebRelease
fi fi
@ -484,4 +677,5 @@ else
echo -e " $0 release" echo -e " $0 release"
echo -e " $0 release lite" echo -e " $0 release lite"
echo -e " $0 release docker lite" echo -e " $0 release docker lite"
echo -e " $0 release linux_musl"
fi fi

View File

@ -4,6 +4,8 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
package cmd package cmd
import ( import (
"fmt"
"github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting" "github.com/OpenListTeam/OpenList/v4/internal/setting"
@ -24,10 +26,11 @@ var AdminCmd = &cobra.Command{
if err != nil { if err != nil {
utils.Log.Errorf("failed get admin user: %+v", err) utils.Log.Errorf("failed get admin user: %+v", err)
} else { } else {
utils.Log.Infof("Admin user's username: %s", admin.Username) utils.Log.Infof("get admin user from CLI")
utils.Log.Infof("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed") fmt.Println("Admin user's username:", admin.Username)
utils.Log.Infof("You can reset the password with a random string by running [openlist admin random]") fmt.Println("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed")
utils.Log.Infof("You can also set a new password by running [openlist admin set NEW_PASSWORD]") fmt.Println("You can reset the password with a random string by running [openlist admin random]")
fmt.Println("You can also set a new password by running [openlist admin set NEW_PASSWORD]")
} }
}, },
} }
@ -36,6 +39,7 @@ var RandomPasswordCmd = &cobra.Command{
Use: "random", Use: "random",
Short: "Reset admin user's password to a random string", Short: "Reset admin user's password to a random string",
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
utils.Log.Infof("reset admin user's password to a random string from CLI")
newPwd := random.String(8) newPwd := random.String(8)
setAdminPassword(newPwd) setAdminPassword(newPwd)
}, },
@ -44,12 +48,12 @@ var RandomPasswordCmd = &cobra.Command{
var SetPasswordCmd = &cobra.Command{ var SetPasswordCmd = &cobra.Command{
Use: "set", Use: "set",
Short: "Set admin user's password", Short: "Set admin user's password",
Run: func(cmd *cobra.Command, args []string) { RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 { if len(args) == 0 {
utils.Log.Errorf("Please enter the new password") return fmt.Errorf("Please enter the new password")
return
} }
setAdminPassword(args[0]) setAdminPassword(args[0])
return nil
}, },
} }
@ -60,7 +64,8 @@ var ShowTokenCmd = &cobra.Command{
Init() Init()
defer Release() defer Release()
token := setting.GetStr(conf.Token) token := setting.GetStr(conf.Token)
utils.Log.Infof("Admin token: %s", token) utils.Log.Infof("show admin token from CLI")
fmt.Println("Admin token:", token)
}, },
} }
@ -77,9 +82,10 @@ func setAdminPassword(pwd string) {
utils.Log.Errorf("failed update admin user: %+v", err) utils.Log.Errorf("failed update admin user: %+v", err)
return return
} }
utils.Log.Infof("admin user has been updated:") utils.Log.Infof("admin user has been update from CLI")
utils.Log.Infof("username: %s", admin.Username) fmt.Println("admin user has been updated:")
utils.Log.Infof("password: %s", pwd) fmt.Println("username:", admin.Username)
fmt.Println("password:", pwd)
DelAdminCacheOnline() DelAdminCacheOnline()
} }

View File

@ -4,6 +4,8 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
package cmd package cmd
import ( import (
"fmt"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -24,7 +26,8 @@ var Cancel2FACmd = &cobra.Command{
if err != nil { if err != nil {
utils.Log.Errorf("failed to cancel 2FA: %+v", err) utils.Log.Errorf("failed to cancel 2FA: %+v", err)
} else { } else {
utils.Log.Info("2FA canceled") utils.Log.Infof("2FA is canceled from CLI")
fmt.Println("2FA canceled")
DelAdminCacheOnline() DelAdminCacheOnline()
} }
} }

View File

@ -65,6 +65,7 @@ the address is defined in config file`,
var httpSrv, httpsSrv, unixSrv *http.Server var httpSrv, httpsSrv, unixSrv *http.Server
if conf.Conf.Scheme.HttpPort != -1 { if conf.Conf.Scheme.HttpPort != -1 {
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort) httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
fmt.Printf("start HTTP server @ %s\n", httpBase)
utils.Log.Infof("start HTTP server @ %s", httpBase) utils.Log.Infof("start HTTP server @ %s", httpBase)
httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler} httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler}
go func() { go func() {
@ -76,6 +77,7 @@ the address is defined in config file`,
} }
if conf.Conf.Scheme.HttpsPort != -1 { if conf.Conf.Scheme.HttpsPort != -1 {
httpsBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpsPort) httpsBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpsPort)
fmt.Printf("start HTTPS server @ %s\n", httpsBase)
utils.Log.Infof("start HTTPS server @ %s", httpsBase) utils.Log.Infof("start HTTPS server @ %s", httpsBase)
httpsSrv = &http.Server{Addr: httpsBase, Handler: r} httpsSrv = &http.Server{Addr: httpsBase, Handler: r}
go func() { go func() {
@ -86,6 +88,7 @@ the address is defined in config file`,
}() }()
} }
if conf.Conf.Scheme.UnixFile != "" { if conf.Conf.Scheme.UnixFile != "" {
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile) utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile)
unixSrv = &http.Server{Handler: httpHandler} unixSrv = &http.Server{Handler: httpHandler}
go func() { go func() {
@ -114,6 +117,7 @@ the address is defined in config file`,
s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out)) s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
server.InitS3(s3r) server.InitS3(s3r)
s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port) s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port)
fmt.Printf("start S3 server @ %s\n", s3Base)
utils.Log.Infof("start S3 server @ %s", s3Base) utils.Log.Infof("start S3 server @ %s", s3Base)
go func() { go func() {
var err error var err error
@ -138,6 +142,7 @@ the address is defined in config file`,
if err != nil { if err != nil {
utils.Log.Fatalf("failed to start ftp driver: %s", err.Error()) utils.Log.Fatalf("failed to start ftp driver: %s", err.Error())
} else { } else {
fmt.Printf("start ftp server on %s\n", conf.Conf.FTP.Listen)
utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen) utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen)
go func() { go func() {
ftpServer = ftpserver.NewFtpServer(ftpDriver) ftpServer = ftpserver.NewFtpServer(ftpDriver)
@ -156,6 +161,7 @@ the address is defined in config file`,
if err != nil { if err != nil {
utils.Log.Fatalf("failed to start sftp driver: %s", err.Error()) utils.Log.Fatalf("failed to start sftp driver: %s", err.Error())
} else { } else {
fmt.Printf("start sftp server on %s", conf.Conf.SFTP.Listen)
utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen) utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen)
go func() { go func() {
sftpServer = sftpd.NewSftpServer(sftpDriver) sftpServer = sftpd.NewSftpServer(sftpDriver)

View File

@ -4,6 +4,7 @@ Copyright © 2023 NAME HERE <EMAIL ADDRESS>
package cmd package cmd
import ( import (
"fmt"
"os" "os"
"strconv" "strconv"
@ -22,28 +23,61 @@ var storageCmd = &cobra.Command{
} }
var disableStorageCmd = &cobra.Command{ var disableStorageCmd = &cobra.Command{
Use: "disable", Use: "disable [mount path]",
Short: "Disable a storage", Short: "Disable a storage by mount path",
Run: func(cmd *cobra.Command, args []string) { RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 { if len(args) < 1 {
utils.Log.Errorf("mount path is required") return fmt.Errorf("mount path is required")
return
} }
mountPath := args[0] mountPath := args[0]
Init() Init()
defer Release() defer Release()
storage, err := db.GetStorageByMountPath(mountPath) storage, err := db.GetStorageByMountPath(mountPath)
if err != nil { if err != nil {
utils.Log.Errorf("failed to query storage: %+v", err) return fmt.Errorf("failed to query storage: %+v", err)
} else { }
storage.Disabled = true storage.Disabled = true
err = db.UpdateStorage(storage) err = db.UpdateStorage(storage)
if err != nil { if err != nil {
utils.Log.Errorf("failed to update storage: %+v", err) return fmt.Errorf("failed to update storage: %+v", err)
} else { }
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath) utils.Log.Infof("Storage with mount path [%s] has been disabled from CLI", mountPath)
fmt.Printf("Storage with mount path [%s] has been disabled\n", mountPath)
return nil
},
}
var deleteStorageCmd = &cobra.Command{
Use: "delete [id]",
Short: "Delete a storage by id",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return fmt.Errorf("id is required")
}
id, err := strconv.Atoi(args[0])
if err != nil {
return fmt.Errorf("id must be a number")
}
if force, _ := cmd.Flags().GetBool("force"); force {
fmt.Printf("Are you sure you want to delete storage with id [%d]? [y/N]: ", id)
var confirm string
fmt.Scanln(&confirm)
if confirm != "y" && confirm != "Y" {
fmt.Println("Delete operation cancelled.")
return nil
} }
} }
Init()
defer Release()
err = db.DeleteStorageById(uint(id))
if err != nil {
return fmt.Errorf("failed to delete storage by id: %+v", err)
}
utils.Log.Infof("Storage with id [%d] have been deleted from CLI", id)
fmt.Printf("Storage with id [%d] have been deleted\n", id)
return nil
}, },
} }
@ -88,14 +122,14 @@ var storageTableHeight int
var listStorageCmd = &cobra.Command{ var listStorageCmd = &cobra.Command{
Use: "list", Use: "list",
Short: "List all storages", Short: "List all storages",
Run: func(cmd *cobra.Command, args []string) { RunE: func(cmd *cobra.Command, args []string) error {
Init() Init()
defer Release() defer Release()
storages, _, err := db.GetStorages(1, -1) storages, _, err := db.GetStorages(1, -1)
if err != nil { if err != nil {
utils.Log.Errorf("failed to query storages: %+v", err) return fmt.Errorf("failed to query storages: %+v", err)
} else { } else {
utils.Log.Infof("Found %d storages", len(storages)) fmt.Printf("Found %d storages\n", len(storages))
columns := []table.Column{ columns := []table.Column{
{Title: "ID", Width: 4}, {Title: "ID", Width: 4},
{Title: "Driver", Width: 16}, {Title: "Driver", Width: 16},
@ -138,10 +172,11 @@ var listStorageCmd = &cobra.Command{
m := model{t} m := model{t}
if _, err := tea.NewProgram(m).Run(); err != nil { if _, err := tea.NewProgram(m).Run(); err != nil {
utils.Log.Errorf("failed to run program: %+v", err) fmt.Printf("failed to run program: %+v\n", err)
os.Exit(1) os.Exit(1)
} }
} }
return nil
}, },
} }
@ -151,6 +186,8 @@ func init() {
storageCmd.AddCommand(disableStorageCmd) storageCmd.AddCommand(disableStorageCmd)
storageCmd.AddCommand(listStorageCmd) storageCmd.AddCommand(listStorageCmd)
storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height") storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height")
storageCmd.AddCommand(deleteStorageCmd)
deleteStorageCmd.Flags().BoolP("force", "f", false, "Force delete without confirmation")
// Here you will define your flags and configuration settings. // Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command // Cobra supports Persistent Flags which will work for this command

View File

@ -6,10 +6,9 @@ services:
ports: ports:
- '5244:5244' - '5244:5244'
- '5245:5245' - '5245:5245'
user: '0:0'
environment: environment:
- PUID=0
- PGID=0
- UMASK=022 - UMASK=022
- TZ=UTC - TZ=Asia/Shanghai
container_name: openlist container_name: openlist
image: 'openlistteam/openlist:latest' image: 'openlistteam/openlist:latest'

View File

@ -1,43 +1,60 @@
package _115 package _115
import ( import (
"errors"
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
driver115 "github.com/SheltonZhu/115driver/pkg/driver" driver115 "github.com/SheltonZhu/115driver/pkg/driver"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
var ( var (
md5Salt = "Qclm8MGWUv59TnrR0XPg" md5Salt = "Qclm8MGWUv59TnrR0XPg"
appVer = "27.0.5.7" appVer = "35.6.0.3"
) )
func (d *Pan115) getAppVersion() ([]driver115.AppVersion, error) { func (d *Pan115) getAppVersion() (string, error) {
result := driver115.VersionResp{} result := VersionResp{}
resp, err := base.RestyClient.R().Get(driver115.ApiGetVersion) res, err := base.RestyClient.R().Get(driver115.ApiGetVersion)
err = driver115.CheckErr(err, &result, resp)
if err != nil { if err != nil {
return nil, err return "", err
} }
err = utils.Json.Unmarshal(res.Body(), &result)
return result.Data.GetAppVersions(), nil if err != nil {
return "", err
}
if len(result.Error) > 0 {
return "", errors.New(result.Error)
}
return result.Data.Win.Version, nil
} }
func (d *Pan115) getAppVer() string { func (d *Pan115) getAppVer() string {
// todo add some cache ver, err := d.getAppVersion()
vers, err := d.getAppVersion()
if err != nil { if err != nil {
log.Warnf("[115] get app version failed: %v", err) log.Warnf("[115] get app version failed: %v", err)
return appVer return appVer
} }
for _, ver := range vers { if len(ver) > 0 {
if ver.AppName == "win" { return ver
return ver.Version
}
} }
return appVer return appVer
} }
func (d *Pan115) initAppVer() { func (d *Pan115) initAppVer() {
appVer = d.getAppVer() appVer = d.getAppVer()
log.Debugf("use app version: %v", appVer)
}
type VersionResp struct {
Error string `json:"error,omitempty"`
Data Versions `json:"data"`
}
type Versions struct {
Win Version `json:"win"`
}
type Version struct {
Version string `json:"version_code"`
} }

View File

@ -186,9 +186,7 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
preHash = strings.ToUpper(preHash) preHash = strings.ToUpper(preHash)
fullHash := stream.GetHash().GetHash(utils.SHA1) fullHash := stream.GetHash().GetHash(utils.SHA1)
if len(fullHash) != utils.SHA1.Width { if len(fullHash) != utils.SHA1.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50) _, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
up = model.UpdateProgressWithRange(up, 50, 100)
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA1)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -321,7 +321,7 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
err error err error
) )
tmpF, err := s.CacheFullInTempFile() tmpF, err := s.CacheFullAndWriter(&up, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -131,6 +131,23 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}, nil }, nil
} }
func (d *Open115) GetObjInfo(ctx context.Context, path string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
resp, err := d.client.GetFolderInfoByPath(ctx, path)
if err != nil {
return nil, err
}
return &Obj{
Fid: resp.FileID,
Fn: resp.FileName,
Fc: resp.FileCategory,
Sha1: resp.Sha1,
Pc: resp.PickCode,
}, nil
}
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil { if err := d.WaitLimit(ctx); err != nil {
return nil, err return nil, err
@ -222,9 +239,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
} }
sha1 := file.GetHash().GetHash(utils.SHA1) sha1 := file.GetHash().GetHash(utils.SHA1)
if len(sha1) != utils.SHA1.Width { if len(sha1) != utils.SHA1.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50) _, sha1, err = stream.CacheFullAndHash(file, &up, utils.SHA1)
up = model.UpdateProgressWithRange(up, 50, 100)
_, sha1, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.SHA1)
if err != nil { if err != nil {
return err return err
} }
@ -322,6 +337,27 @@ func (d *Open115) OfflineList(ctx context.Context) (*sdk.OfflineTaskListResp, er
return resp, nil return resp, nil
} }
func (d *Open115) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
userInfo, err := d.client.UserInfo(ctx)
if err != nil {
return nil, err
}
total, err := userInfo.RtSpaceInfo.AllTotal.Size.Int64()
if err != nil {
return nil, err
}
free, err := userInfo.RtSpaceInfo.AllRemain.Size.Int64()
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: uint64(total),
FreeSpace: uint64(free),
},
}, nil
}
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { // func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional // // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
// return nil, errs.NotImplement // return nil, errs.NotImplement

View File

@ -9,6 +9,7 @@ import (
sdk "github.com/OpenListTeam/115-sdk-go" sdk "github.com/OpenListTeam/115-sdk-go"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss" "github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/avast/retry-go" "github.com/avast/retry-go"
@ -69,9 +70,6 @@ func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp
// } // }
func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error { func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
fileSize := stream.GetSize()
chunkSize := calPartSize(fileSize)
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken)) ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
if err != nil { if err != nil {
return err return err
@ -86,6 +84,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
return err return err
} }
fileSize := stream.GetSize()
chunkSize := calPartSize(fileSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize), &up)
if err != nil {
return err
}
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
parts := make([]oss.UploadPart, partNum) parts := make([]oss.UploadPart, partNum)
offset := int64(0) offset := int64(0)
@ -98,10 +103,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
if i == partNum { if i == partNum {
partSize = fileSize - (i-1)*chunkSize partSize = fileSize - (i-1)*chunkSize
} }
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize)) rd, err := ss.GetSectionReader(offset, partSize)
if err != nil {
return err
}
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
err = retry.Do(func() error { err = retry.Do(func() error {
_ = rd.Reset() rd.Seek(0, io.SeekStart)
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i)) part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i))
if err != nil { if err != nil {
return err return err
@ -112,6 +120,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
retry.Attempts(3), retry.Attempts(3),
retry.DelayType(retry.BackOffDelay), retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second)) retry.Delay(time.Second))
ss.FreeSectionReader(rd)
if err != nil { if err != nil {
return err return err
} }

View File

@ -64,14 +64,6 @@ func (d *Pan123) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if f, ok := file.(File); ok { if f, ok := file.(File); ok {
//var resp DownResp
var headers map[string]string
if !utils.IsLocalIPAddr(args.IP) {
headers = map[string]string{
//"X-Real-IP": "1.1.1.1",
"X-Forwarded-For": args.IP,
}
}
data := base.Json{ data := base.Json{
"driveId": 0, "driveId": 0,
"etag": f.Etag, "etag": f.Etag,
@ -83,25 +75,27 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
} }
resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) { resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetHeaders(headers) req.SetBody(data)
}, nil) }, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
downloadUrl := utils.Json.Get(resp, "data", "DownloadUrl").ToString() downloadUrl := utils.Json.Get(resp, "data", "DownloadUrl").ToString()
u, err := url.Parse(downloadUrl) ou, err := url.Parse(downloadUrl)
if err != nil { if err != nil {
return nil, err return nil, err
} }
nu := u.Query().Get("params") u_ := ou.String()
nu := ou.Query().Get("params")
if nu != "" { if nu != "" {
du, _ := base64.StdEncoding.DecodeString(nu) du, _ := base64.StdEncoding.DecodeString(nu)
u, err = url.Parse(string(du)) u, err := url.Parse(string(du))
if err != nil { if err != nil {
return nil, err return nil, err
} }
u_ = u.String()
} }
u_ := u.String()
log.Debug("download url: ", u_) log.Debug("download url: ", u_)
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_) res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
if err != nil { if err != nil {
@ -118,7 +112,7 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString() link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
} }
link.Header = http.Header{ link.Header = http.Header{
"Referer": []string{"https://www.123pan.com/"}, "Referer": []string{fmt.Sprintf("%s://%s/", ou.Scheme, ou.Host)},
} }
return &link, nil return &link, nil
} else { } else {
@ -188,9 +182,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
etag := file.GetHash().GetHash(utils.MD5) etag := file.GetHash().GetHash(utils.MD5)
var err error var err error
if len(etag) < utils.MD5.Width { if len(etag) < utils.MD5.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50) _, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
up = model.UpdateProgressWithRange(up, 50, 100)
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
if err != nil { if err != nil {
return err return err
} }

View File

@ -11,7 +11,8 @@ type Addition struct {
driver.RootID driver.RootID
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"` //OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` //OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
AccessToken string AccessToken string
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
} }
var config = driver.Config{ var config = driver.Config{
@ -22,6 +23,11 @@ var config = driver.Config{
func init() { func init() {
op.RegisterDriver(func() driver.Driver { op.RegisterDriver(func() driver.Driver {
return &Pan123{} // 新增默认选项 要在RegisterDriver初始化设置 才会对正在使用的用户生效
return &Pan123{
Addition: Addition{
UploadThread: 3,
},
}
}) })
} }

View File

@ -28,7 +28,7 @@ func (f File) CreateTime() time.Time {
} }
func (f File) GetHash() utils.HashInfo { func (f File) GetHash() utils.HashInfo {
return utils.HashInfo{} return utils.NewHashInfo(utils.MD5, f.Etag)
} }
func (f File) GetPath() string { func (f File) GetPath() string {

View File

@ -6,11 +6,16 @@ import (
"io" "io"
"net/http" "net/http"
"strconv" "strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
) )
@ -69,18 +74,21 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
} }
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error { func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
tmpF, err := file.CacheFullInTempFile() // fetch s3 pre signed urls
size := file.GetSize()
chunkSize := int64(16 * utils.MB)
chunkCount := 1
if size > chunkSize {
chunkCount = int((size + chunkSize - 1) / chunkSize)
}
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil { if err != nil {
return err return err
} }
// fetch s3 pre signed urls
size := file.GetSize()
chunkSize := min(size, 16*utils.MB)
chunkCount := int(size / chunkSize)
lastChunkSize := size % chunkSize lastChunkSize := size % chunkSize
if lastChunkSize > 0 { if lastChunkSize == 0 {
chunkCount++
} else {
lastChunkSize = chunkSize lastChunkSize = chunkSize
} }
// only 1 batch is allowed // only 1 batch is allowed
@ -90,73 +98,99 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
batchSize = 10 batchSize = 10
getS3UploadUrl = d.getS3PreSignedUrls getS3UploadUrl = d.getS3PreSignedUrls
} }
thread := min(int(chunkCount), d.UploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for i := 1; i <= chunkCount; i += batchSize { for i := 1; i <= chunkCount; i += batchSize {
if utils.IsCanceled(ctx) { if utils.IsCanceled(uploadCtx) {
return ctx.Err() break
} }
start := i start := i
end := min(i+batchSize, chunkCount+1) end := min(i+batchSize, chunkCount+1)
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end) s3PreSignedUrls, err := getS3UploadUrl(uploadCtx, upReq, start, end)
if err != nil { if err != nil {
return err return err
} }
// upload each chunk // upload each chunk
for j := start; j < end; j++ { for cur := start; cur < end; cur++ {
if utils.IsCanceled(ctx) { if utils.IsCanceled(uploadCtx) {
return ctx.Err() break
} }
offset := int64(cur-1) * chunkSize
curSize := chunkSize curSize := chunkSize
if j == chunkCount { if cur == chunkCount {
curSize = lastChunkSize curSize = lastChunkSize
} }
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.NewSectionReader(tmpF, chunkSize*int64(j-1), curSize), curSize, false, getS3UploadUrl) var reader *stream.SectionReader
if err != nil { var rateLimitedRd io.Reader
return err threadG.GoWithLifecycle(errgroup.Lifecycle{
} Before: func(ctx context.Context) error {
up(float64(j) * 100 / float64(chunkCount)) if reader == nil {
var err error
reader, err = ss.GetSectionReader(offset, curSize)
if err != nil {
return err
}
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
return nil
},
Do: func(ctx context.Context) error {
reader.Seek(0, io.SeekStart)
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
if uploadUrl == "" {
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
}
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, rateLimitedRd)
if err != nil {
return err
}
req.ContentLength = curSize
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode == http.StatusForbidden {
singleflight.AnyGroup.Do(fmt.Sprintf("Pan123.newUpload_%p", threadG), func() (any, error) {
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
if err != nil {
return nil, err
}
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
return nil, nil
})
if err != nil {
return err
}
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
}
if res.StatusCode != http.StatusOK {
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
}
progress := 10.0 + 85.0*float64(threadG.Success())/float64(chunkCount)
up(progress)
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
})
} }
} }
if err := threadG.Wait(); err != nil {
return err
}
defer up(100)
// complete s3 upload // complete s3 upload
return d.completeS3(ctx, upReq, file, chunkCount > 1) return d.completeS3(ctx, upReq, file, chunkCount > 1)
} }
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader *io.SectionReader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
if uploadUrl == "" {
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
}
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, reader))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = curSize
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode == http.StatusForbidden {
if retry {
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
}
// refresh s3 pre signed urls
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
if err != nil {
return err
}
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
// retry
reader.Seek(0, io.SeekStart)
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
}
if res.StatusCode != http.StatusOK {
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
}
return nil
}

View File

@ -2,7 +2,9 @@ package _123_open
import ( import (
"context" "context"
"fmt"
"strconv" "strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
@ -15,6 +17,7 @@ import (
type Open123 struct { type Open123 struct {
model.Storage model.Storage
Addition Addition
UID uint64
} }
func (d *Open123) Config() driver.Config { func (d *Open123) Config() driver.Config {
@ -67,13 +70,45 @@ func (d *Open123) List(ctx context.Context, dir model.Obj, args model.ListArgs)
func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
fileId, _ := strconv.ParseInt(file.GetID(), 10, 64) fileId, _ := strconv.ParseInt(file.GetID(), 10, 64)
if d.DirectLink {
res, err := d.getDirectLink(fileId)
if err != nil {
return nil, err
}
if d.DirectLinkPrivateKey == "" {
duration := 365 * 24 * time.Hour // 缓存1年
return &model.Link{
URL: res.Data.URL,
Expiration: &duration,
}, nil
}
uid, err := d.getUID()
if err != nil {
return nil, err
}
duration := time.Duration(d.DirectLinkValidDuration) * time.Minute
newURL, err := d.SignURL(res.Data.URL, d.DirectLinkPrivateKey,
uid, duration)
if err != nil {
return nil, err
}
return &model.Link{
URL: newURL,
Expiration: &duration,
}, nil
}
res, err := d.getDownloadInfo(fileId) res, err := d.getDownloadInfo(fileId)
if err != nil { if err != nil {
return nil, err return nil, err
} }
link := model.Link{URL: res.Data.DownloadUrl} return &model.Link{URL: res.Data.DownloadUrl}, nil
return &link, nil
} }
func (d *Open123) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { func (d *Open123) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
@ -95,6 +130,22 @@ func (d *Open123) Rename(ctx context.Context, srcObj model.Obj, newName string)
} }
func (d *Open123) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *Open123) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
// 尝试使用上传+MD5秒传功能实现复制
// 1. 创建文件
// parentFileID 父目录id上传到根目录时填写 0
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
if err != nil {
return fmt.Errorf("parse parentFileID error: %v", err)
}
etag := srcObj.(File).Etag
createResp, err := d.create(parentFileId, srcObj.GetName(), etag, srcObj.GetSize(), 2, false)
if err != nil {
return err
}
// 是否秒传
if createResp.Data.Reuse {
return nil
}
return errs.NotSupport return errs.NotSupport
} }
@ -104,27 +155,79 @@ func (d *Open123) Remove(ctx context.Context, obj model.Obj) error {
return d.trash(fileId) return d.trash(fileId)
} }
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// 1. 创建文件
// parentFileID 父目录id上传到根目录时填写 0
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64) parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("parse parentFileID error: %v", err)
}
// etag 文件md5
etag := file.GetHash().GetHash(utils.MD5) etag := file.GetHash().GetHash(utils.MD5)
if len(etag) < utils.MD5.Width { if len(etag) < utils.MD5.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50) _, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
up = model.UpdateProgressWithRange(up, 50, 100)
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
if err != nil { if err != nil {
return err return nil, err
} }
} }
createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false) createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false)
if err != nil { if err != nil {
return err return nil, err
} }
// 是否秒传
if createResp.Data.Reuse { if createResp.Data.Reuse {
return nil // 秒传成功才会返回正确的 FileID否则为 0
if createResp.Data.FileID != 0 {
return File{
FileName: file.GetName(),
Size: file.GetSize(),
FileId: createResp.Data.FileID,
Type: 2,
Etag: etag,
}, nil
}
} }
return d.Upload(ctx, file, createResp, up) // 2. 上传分片
err = d.Upload(ctx, file, createResp, up)
if err != nil {
return nil, err
}
// 3. 上传完毕
for range 60 {
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
// 返回错误代码未知20103文档也没有具体说
if err == nil && uploadCompleteResp.Data.Completed && uploadCompleteResp.Data.FileID != 0 {
up(100)
return File{
FileName: file.GetName(),
Size: file.GetSize(),
FileId: uploadCompleteResp.Data.FileID,
Type: 2,
Etag: etag,
}, nil
}
// 若接口返回的completed为 false 时则需间隔1秒继续轮询此接口获取上传最终结果。
time.Sleep(time.Second)
}
return nil, fmt.Errorf("upload complete timeout")
}
func (d *Open123) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
userInfo, err := d.getUserInfo()
if err != nil {
return nil, err
}
total := userInfo.Data.SpacePermanent + userInfo.Data.SpaceTemp
free := total - userInfo.Data.SpaceUsed
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: free,
},
}, nil
} }
var _ driver.Driver = (*Open123)(nil) var _ driver.Driver = (*Open123)(nil)
var _ driver.PutResult = (*Open123)(nil)

View File

@ -23,6 +23,11 @@ type Addition struct {
// 上传线程数 // 上传线程数
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"` UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
// 使用直链
DirectLink bool `json:"DirectLink" type:"bool" default:"false" required:"false" help:"use direct link when download file"`
DirectLinkPrivateKey string `json:"DirectLinkPrivateKey" required:"false" help:"private key for direct link, if URL authentication is enabled"`
DirectLinkValidDuration int64 `json:"DirectLinkValidDuration" type:"number" default:"30" required:"false" help:"minutes, if URL authentication is enabled"`
driver.RootID driver.RootID
} }

View File

@ -73,7 +73,9 @@ func (f File) GetName() string {
} }
func (f File) CreateTime() time.Time { func (f File) CreateTime() time.Time {
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.CreateAt) // 返回的时间没有时区信息,默认 UTC+8
loc := time.FixedZone("UTC+8", 8*60*60)
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.CreateAt, loc)
if err != nil { if err != nil {
return time.Now() return time.Now()
} }
@ -81,7 +83,9 @@ func (f File) CreateTime() time.Time {
} }
func (f File) ModTime() time.Time { func (f File) ModTime() time.Time {
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.UpdateAt) // 返回的时间没有时区信息,默认 UTC+8
loc := time.FixedZone("UTC+8", 8*60*60)
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.UpdateAt, loc)
if err != nil { if err != nil {
return time.Now() return time.Now()
} }
@ -123,19 +127,19 @@ type RefreshTokenResp struct {
type UserInfoResp struct { type UserInfoResp struct {
BaseResp BaseResp
Data struct { Data struct {
UID int64 `json:"uid"` UID uint64 `json:"uid"`
Username string `json:"username"` // Username string `json:"username"`
DisplayName string `json:"displayName"` // DisplayName string `json:"displayName"`
HeadImage string `json:"headImage"` // HeadImage string `json:"headImage"`
Passport string `json:"passport"` // Passport string `json:"passport"`
Mail string `json:"mail"` // Mail string `json:"mail"`
SpaceUsed int64 `json:"spaceUsed"` SpaceUsed uint64 `json:"spaceUsed"`
SpacePermanent int64 `json:"spacePermanent"` SpacePermanent uint64 `json:"spacePermanent"`
SpaceTemp int64 `json:"spaceTemp"` SpaceTemp uint64 `json:"spaceTemp"`
SpaceTempExpr string `json:"spaceTempExpr"` // SpaceTempExpr int64 `json:"spaceTempExpr"`
Vip bool `json:"vip"` // Vip bool `json:"vip"`
DirectTraffic int64 `json:"directTraffic"` // DirectTraffic int64 `json:"directTraffic"`
IsHideUID bool `json:"isHideUID"` // IsHideUID bool `json:"isHideUID"`
} `json:"data"` } `json:"data"`
} }
@ -154,52 +158,30 @@ type DownloadInfoResp struct {
} `json:"data"` } `json:"data"`
} }
type DirectLinkResp struct {
BaseResp
Data struct {
URL string `json:"url"`
} `json:"data"`
}
// 创建文件V2返回
type UploadCreateResp struct { type UploadCreateResp struct {
BaseResp BaseResp
Data struct { Data struct {
FileID int64 `json:"fileID"` FileID int64 `json:"fileID"`
PreuploadID string `json:"preuploadID"` PreuploadID string `json:"preuploadID"`
Reuse bool `json:"reuse"` Reuse bool `json:"reuse"`
SliceSize int64 `json:"sliceSize"` SliceSize int64 `json:"sliceSize"`
Servers []string `json:"servers"`
} `json:"data"` } `json:"data"`
} }
type UploadUrlResp struct { // 上传完毕V2返回
BaseResp
Data struct {
PresignedURL string `json:"presignedURL"`
}
}
type UploadCompleteResp struct { type UploadCompleteResp struct {
BaseResp BaseResp
Data struct { Data struct {
Async bool `json:"async"`
Completed bool `json:"completed"` Completed bool `json:"completed"`
FileID int64 `json:"fileID"` FileID int64 `json:"fileID"`
} `json:"data"` } `json:"data"`
} }
type UploadAsyncResp struct {
BaseResp
Data struct {
Completed bool `json:"completed"`
FileID int64 `json:"fileID"`
} `json:"data"`
}
type UploadResp struct {
BaseResp
Data struct {
AccessKeyId string `json:"AccessKeyId"`
Bucket string `json:"Bucket"`
Key string `json:"Key"`
SecretAccessKey string `json:"SecretAccessKey"`
SessionToken string `json:"SessionToken"`
FileId int64 `json:"FileId"`
Reuse bool `json:"Reuse"`
EndPoint string `json:"EndPoint"`
StorageNode string `json:"StorageNode"`
UploadId string `json:"UploadId"`
} `json:"data"`
}

View File

@ -1,21 +1,28 @@
package _123_open package _123_open
import ( import (
"bytes"
"context" "context"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http" "net/http"
"strconv"
"strings" "strings"
"time" "time"
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup" "github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go" "github.com/avast/retry-go"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
) )
// 创建文件 V2
func (d *Open123) create(parentFileID int64, filename string, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) { func (d *Open123) create(parentFileID int64, filename string, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) {
var resp UploadCreateResp var resp UploadCreateResp
_, err := d.Request(UploadCreate, http.MethodPost, func(req *resty.Request) { _, err := d.Request(UploadCreate, http.MethodPost, func(req *resty.Request) {
@ -34,21 +41,136 @@ func (d *Open123) create(parentFileID int64, filename string, etag string, size
return &resp, nil return &resp, nil
} }
func (d *Open123) url(preuploadID string, sliceNo int64) (string, error) { // 上传分片 V2
// get upload url func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
var resp UploadUrlResp uploadDomain := createResp.Data.Servers[0]
_, err := d.Request(UploadUrl, http.MethodPost, func(req *resty.Request) { size := file.GetSize()
req.SetBody(base.Json{ chunkSize := createResp.Data.SliceSize
"preuploadId": preuploadID,
"sliceNo": sliceNo, ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
})
}, &resp)
if err != nil { if err != nil {
return "", err return err
} }
return resp.Data.PresignedURL, nil
uploadNums := (size + chunkSize - 1) / chunkSize
thread := min(int(uploadNums), d.UploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for partIndex := range uploadNums {
if utils.IsCanceled(uploadCtx) {
break
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片号从1开始
offset := partIndex * chunkSize
size := min(chunkSize, size-offset)
var reader *stream.SectionReader
var rateLimitedRd io.Reader
sliceMD5 := ""
// 表单
b := bytes.NewBuffer(make([]byte, 0, 2048))
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
// 每个分片一个reader
reader, err = ss.GetSectionReader(offset, size)
if err != nil {
return err
}
// 计算当前分片的MD5
sliceMD5, err = utils.HashReader(utils.MD5, reader)
if err != nil {
return err
}
}
return nil
},
Do: func(ctx context.Context) error {
// 重置分片reader位置因为HashReader、上一次失败已经读取到分片EOF
reader.Seek(0, io.SeekStart)
b.Reset()
w := multipart.NewWriter(b)
// 添加表单字段
err = w.WriteField("preuploadID", createResp.Data.PreuploadID)
if err != nil {
return err
}
err = w.WriteField("sliceNo", strconv.FormatInt(partNumber, 10))
if err != nil {
return err
}
err = w.WriteField("sliceMD5", sliceMD5)
if err != nil {
return err
}
// 写入文件内容
_, err = w.CreateFormFile("slice", fmt.Sprintf("%s.part%d", file.GetName(), partNumber))
if err != nil {
return err
}
headSize := b.Len()
err = w.Close()
if err != nil {
return err
}
head := bytes.NewReader(b.Bytes()[:headSize])
tail := bytes.NewReader(b.Bytes()[headSize:])
rateLimitedRd = driver.NewLimitedUploadStream(ctx, io.MultiReader(head, reader, tail))
// 创建请求并设置header
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadDomain+"/upload/v2/file/slice", rateLimitedRd)
if err != nil {
return err
}
// 设置请求头
req.Header.Add("Authorization", "Bearer "+d.AccessToken)
req.Header.Add("Content-Type", w.FormDataContentType())
req.Header.Add("Platform", "open_platform")
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return fmt.Errorf("slice %d upload failed, status code: %d", partNumber, res.StatusCode)
}
var resp BaseResp
respBody, err := io.ReadAll(res.Body)
if err != nil {
return err
}
err = json.Unmarshal(respBody, &resp)
if err != nil {
return err
}
if resp.Code != 0 {
return fmt.Errorf("slice %d upload failed: %s", partNumber, resp.Message)
}
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
up(progress)
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
})
}
if err := threadG.Wait(); err != nil {
return err
}
return nil
} }
// 上传完毕
func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) { func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
var resp UploadCompleteResp var resp UploadCompleteResp
_, err := d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) { _, err := d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
@ -61,91 +183,3 @@ func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
} }
return &resp, nil return &resp, nil
} }
func (d *Open123) async(preuploadID string) (*UploadAsyncResp, error) {
var resp UploadAsyncResp
_, err := d.Request(UploadAsync, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"preuploadID": preuploadID,
})
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
size := file.GetSize()
chunkSize := createResp.Data.SliceSize
uploadNums := (size + chunkSize - 1) / chunkSize
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.UploadThread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for partIndex := int64(0); partIndex < uploadNums; partIndex++ {
if utils.IsCanceled(uploadCtx) {
return ctx.Err()
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片号从1开始
offset := partIndex * chunkSize
size := min(chunkSize, size-offset)
limitedReader, err := file.RangeRead(http_range.Range{
Start: offset,
Length: size})
if err != nil {
return err
}
limitedReader = driver.NewLimitedUploadStream(ctx, limitedReader)
threadG.Go(func(ctx context.Context) error {
uploadPartUrl, err := d.url(createResp.Data.PreuploadID, partNumber)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, "PUT", uploadPartUrl, limitedReader)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = size
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
_ = res.Body.Close()
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
up(progress)
return nil
})
}
if err := threadG.Wait(); err != nil {
return err
}
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
if err != nil {
return err
}
if uploadCompleteResp.Data.Async == false || uploadCompleteResp.Data.Completed {
return nil
}
for {
uploadAsyncResp, err := d.async(createResp.Data.PreuploadID)
if err != nil {
return err
}
if uploadAsyncResp.Data.Completed {
break
}
}
up(100)
return nil
}

View File

@ -1,15 +1,20 @@
package _123_open package _123_open
import ( import (
"crypto/md5"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"net/http" "net/http"
"net/url"
"strconv" "strconv"
"strings"
"time" "time"
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
"github.com/google/uuid"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -19,16 +24,15 @@ var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于
AccessToken = InitApiInfo(Api+"/api/v1/access_token", 1) AccessToken = InitApiInfo(Api+"/api/v1/access_token", 1)
RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1) RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1)
UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1) UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1)
FileList = InitApiInfo(Api+"/api/v2/file/list", 4) FileList = InitApiInfo(Api+"/api/v2/file/list", 3)
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 0) DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 5)
DirectLink = InitApiInfo(Api+"/api/v1/direct-link/url", 5)
Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2) Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2)
Move = InitApiInfo(Api+"/api/v1/file/move", 1) Move = InitApiInfo(Api+"/api/v1/file/move", 1)
Rename = InitApiInfo(Api+"/api/v1/file/name", 1) Rename = InitApiInfo(Api+"/api/v1/file/name", 1)
Trash = InitApiInfo(Api+"/api/v1/file/trash", 2) Trash = InitApiInfo(Api+"/api/v1/file/trash", 2)
UploadCreate = InitApiInfo(Api+"/upload/v1/file/create", 2) UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2)
UploadUrl = InitApiInfo(Api+"/upload/v1/file/get_upload_url", 0) UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0)
UploadComplete = InitApiInfo(Api+"/upload/v1/file/upload_complete", 0)
UploadAsync = InitApiInfo(Api+"/upload/v1/file/upload_async_result", 1)
) )
func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
@ -82,8 +86,24 @@ func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCall
} }
func (d *Open123) flushAccessToken() error { func (d *Open123) flushAccessToken() error {
if d.Addition.ClientID != "" { if d.ClientID != "" {
if d.Addition.ClientSecret != "" { if d.RefreshToken != "" {
var resp RefreshTokenResp
_, err := d.Request(RefreshToken, http.MethodPost, func(req *resty.Request) {
req.SetQueryParam("client_id", d.ClientID)
if d.ClientSecret != "" {
req.SetQueryParam("client_secret", d.ClientSecret)
}
req.SetQueryParam("grant_type", "refresh_token")
req.SetQueryParam("refresh_token", d.RefreshToken)
}, &resp)
if err != nil {
return err
}
d.AccessToken = resp.AccessToken
d.RefreshToken = resp.RefreshToken
op.MustSaveDriverStorage(d)
} else if d.ClientSecret != "" {
var resp AccessTokenResp var resp AccessTokenResp
_, err := d.Request(AccessToken, http.MethodPost, func(req *resty.Request) { _, err := d.Request(AccessToken, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
@ -96,24 +116,38 @@ func (d *Open123) flushAccessToken() error {
} }
d.AccessToken = resp.Data.AccessToken d.AccessToken = resp.Data.AccessToken
op.MustSaveDriverStorage(d) op.MustSaveDriverStorage(d)
} else if d.Addition.RefreshToken != "" {
var resp RefreshTokenResp
_, err := d.Request(RefreshToken, http.MethodPost, func(req *resty.Request) {
req.SetQueryParam("client_id", d.ClientID)
req.SetQueryParam("grant_type", "refresh_token")
req.SetQueryParam("refresh_token", d.Addition.RefreshToken)
}, &resp)
if err != nil {
return err
}
d.AccessToken = resp.AccessToken
d.RefreshToken = resp.RefreshToken
op.MustSaveDriverStorage(d)
} }
} }
return nil return nil
} }
func (d *Open123) SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (newURL string, err error) {
// 生成Unix时间戳
ts := time.Now().Add(validDuration).Unix()
// 生成随机数建议使用UUID不能包含中划线-
rand := strings.ReplaceAll(uuid.New().String(), "-", "")
// 解析URL
objURL, err := url.Parse(originURL)
if err != nil {
return "", err
}
// 待签名字符串格式path-timestamp-rand-uid-privateKey
unsignedStr := fmt.Sprintf("%s-%d-%s-%d-%s", objURL.Path, ts, rand, uid, privateKey)
md5Hash := md5.Sum([]byte(unsignedStr))
// 生成鉴权参数格式timestamp-rand-uid-md5hash
authKey := fmt.Sprintf("%d-%s-%d-%x", ts, rand, uid, md5Hash)
// 添加鉴权参数到URL查询参数
v := objURL.Query()
v.Add("auth_key", authKey)
objURL.RawQuery = v.Encode()
return objURL.String(), nil
}
func (d *Open123) getUserInfo() (*UserInfoResp, error) { func (d *Open123) getUserInfo() (*UserInfoResp, error) {
var resp UserInfoResp var resp UserInfoResp
@ -124,6 +158,18 @@ func (d *Open123) getUserInfo() (*UserInfoResp, error) {
return &resp, nil return &resp, nil
} }
func (d *Open123) getUID() (uint64, error) {
if d.UID != 0 {
return d.UID, nil
}
resp, err := d.getUserInfo()
if err != nil {
return 0, err
}
d.UID = resp.Data.UID
return resp.Data.UID, nil
}
func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*FileListResp, error) { func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*FileListResp, error) {
var resp FileListResp var resp FileListResp
@ -161,6 +207,21 @@ func (d *Open123) getDownloadInfo(fileId int64) (*DownloadInfoResp, error) {
return &resp, nil return &resp, nil
} }
func (d *Open123) getDirectLink(fileId int64) (*DirectLinkResp, error) {
var resp DirectLinkResp
_, err := d.Request(DirectLink, http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"fileID": strconv.FormatInt(fileId, 10),
})
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Open123) mkdir(parentID int64, name string) error { func (d *Open123) mkdir(parentID int64, name string) error {
_, err := d.Request(Mkdir, http.MethodPost, func(req *resty.Request) { _, err := d.Request(Mkdir, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{

View File

@ -70,14 +70,6 @@ func (d *Pan123Share) List(ctx context.Context, dir model.Obj, args model.ListAr
func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
// TODO return link of file, required // TODO return link of file, required
if f, ok := file.(File); ok { if f, ok := file.(File); ok {
//var resp DownResp
var headers map[string]string
if !utils.IsLocalIPAddr(args.IP) {
headers = map[string]string{
//"X-Real-IP": "1.1.1.1",
"X-Forwarded-For": args.IP,
}
}
data := base.Json{ data := base.Json{
"shareKey": d.ShareKey, "shareKey": d.ShareKey,
"SharePwd": d.SharePwd, "SharePwd": d.SharePwd,
@ -87,25 +79,27 @@ func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkA
"size": f.Size, "size": f.Size,
} }
resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) { resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetHeaders(headers) req.SetBody(data)
}, nil) }, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
downloadUrl := utils.Json.Get(resp, "data", "DownloadURL").ToString() downloadUrl := utils.Json.Get(resp, "data", "DownloadURL").ToString()
u, err := url.Parse(downloadUrl) ou, err := url.Parse(downloadUrl)
if err != nil { if err != nil {
return nil, err return nil, err
} }
nu := u.Query().Get("params") u_ := ou.String()
nu := ou.Query().Get("params")
if nu != "" { if nu != "" {
du, _ := base64.StdEncoding.DecodeString(nu) du, _ := base64.StdEncoding.DecodeString(nu)
u, err = url.Parse(string(du)) u, err := url.Parse(string(du))
if err != nil { if err != nil {
return nil, err return nil, err
} }
u_ = u.String()
} }
u_ := u.String()
log.Debug("download url: ", u_) log.Debug("download url: ", u_)
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_) res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
if err != nil { if err != nil {
@ -122,7 +116,7 @@ func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkA
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString() link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
} }
link.Header = http.Header{ link.Header = http.Header{
"Referer": []string{"https://www.123pan.com/"}, "Referer": []string{fmt.Sprintf("%s://%s/", ou.Scheme, ou.Host)},
} }
return &link, nil return &link, nil
} }

View File

@ -24,7 +24,7 @@ type File struct {
} }
func (f File) GetHash() utils.HashInfo { func (f File) GetHash() utils.HashInfo {
return utils.HashInfo{} return utils.NewHashInfo(utils.MD5, f.Etag)
} }
func (f File) GetPath() string { func (f File) GetPath() string {

View File

@ -522,32 +522,27 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
var err error var err error
fullHash := stream.GetHash().GetHash(utils.SHA256) fullHash := stream.GetHash().GetHash(utils.SHA256)
if len(fullHash) != utils.SHA256.Width { if len(fullHash) != utils.SHA256.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50) _, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA256)
up = model.UpdateProgressWithRange(up, 50, 100)
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA256)
if err != nil { if err != nil {
return err return err
} }
} }
size := stream.GetSize() size := stream.GetSize()
var partSize = d.getPartSize(size) partSize := d.getPartSize(size)
part := size / partSize part := int64(1)
if size%partSize > 0 { if size > partSize {
part++ part = (size + partSize - 1) / partSize
} else if part == 0 {
part = 1
} }
// 生成所有 partInfos
partInfos := make([]PartInfo, 0, part) partInfos := make([]PartInfo, 0, part)
for i := int64(0); i < part; i++ { for i := int64(0); i < part; i++ {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
} }
start := i * partSize start := i * partSize
byteSize := size - start byteSize := min(size-start, partSize)
if byteSize > partSize {
byteSize = partSize
}
partNumber := i + 1 partNumber := i + 1
partInfo := PartInfo{ partInfo := PartInfo{
PartNumber: partNumber, PartNumber: partNumber,
@ -595,17 +590,20 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
// resp.Data.RapidUpload: true 支持快传,但此处直接检测是否返回分片的上传地址 // resp.Data.RapidUpload: true 支持快传,但此处直接检测是否返回分片的上传地址
// 快传的情况下同样需要手动处理冲突 // 快传的情况下同样需要手动处理冲突
if resp.Data.PartInfos != nil { if resp.Data.PartInfos != nil {
// 读取前100个分片的上传地址 // Progress
uploadPartInfos := resp.Data.PartInfos p := driver.NewProgress(size, up)
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
// 获取后续分片的上传地址 // 先上传前100个分片
for i := 101; i < len(partInfos); i += 100 { err = d.uploadPersonalParts(ctx, partInfos, resp.Data.PartInfos, rateLimited, p)
end := i + 100 if err != nil {
if end > len(partInfos) { return err
end = len(partInfos) }
}
// 如果还有剩余分片,分批获取上传地址并上传
for i := 100; i < len(partInfos); i += 100 {
end := min(i+100, len(partInfos))
batchPartInfos := partInfos[i:end] batchPartInfos := partInfos[i:end]
moredata := base.Json{ moredata := base.Json{
"fileId": resp.Data.FileId, "fileId": resp.Data.FileId,
"uploadId": resp.Data.UploadId, "uploadId": resp.Data.UploadId,
@ -621,45 +619,13 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
if err != nil { if err != nil {
return err return err
} }
uploadPartInfos = append(uploadPartInfos, moreresp.Data.PartInfos...) err = d.uploadPersonalParts(ctx, partInfos, moreresp.Data.PartInfos, rateLimited, p)
}
// Progress
p := driver.NewProgress(size, up)
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
// 上传所有分片
for _, uploadPartInfo := range uploadPartInfos {
index := uploadPartInfo.PartNumber - 1
partSize := partInfos[index].PartSize
log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos))
limitReader := io.LimitReader(rateLimited, partSize)
// Update Progress
r := io.TeeReader(limitReader, p)
req, err := http.NewRequest("PUT", uploadPartInfo.UploadUrl, r)
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", fmt.Sprint(partSize))
req.Header.Set("Origin", "https://yun.139.com")
req.Header.Set("Referer", "https://yun.139.com/")
req.ContentLength = partSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
_ = res.Body.Close()
log.Debugf("[139] uploaded: %+v", res)
if res.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
}
} }
// 全部分片上传完毕后complete
data = base.Json{ data = base.Json{
"contentHash": fullHash, "contentHash": fullHash,
"contentHashAlgorithm": "SHA256", "contentHashAlgorithm": "SHA256",
@ -788,12 +754,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
size := stream.GetSize() size := stream.GetSize()
// Progress // Progress
p := driver.NewProgress(size, up) p := driver.NewProgress(size, up)
var partSize = d.getPartSize(size) partSize := d.getPartSize(size)
part := size / partSize part := int64(1)
if size%partSize > 0 { if size > partSize {
part++ part = (size + partSize - 1) / partSize
} else if part == 0 {
part = 1
} }
rateLimited := driver.NewLimitedUploadStream(ctx, stream) rateLimited := driver.NewLimitedUploadStream(ctx, stream)
for i := int64(0); i < part; i++ { for i := int64(0); i < part; i++ {
@ -807,12 +771,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
limitReader := io.LimitReader(rateLimited, byteSize) limitReader := io.LimitReader(rateLimited, byteSize)
// Update Progress // Update Progress
r := io.TeeReader(limitReader, p) r := io.TeeReader(limitReader, p)
req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r) req, err := http.NewRequestWithContext(ctx, http.MethodPost, resp.Data.UploadResult.RedirectionURL, r)
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName())) req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName()))
req.Header.Set("contentSize", strconv.FormatInt(size, 10)) req.Header.Set("contentSize", strconv.FormatInt(size, 10))
req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1)) req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1))

View File

@ -1,9 +1,11 @@
package _139 package _139
import ( import (
"context"
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
"io"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@ -13,6 +15,7 @@ import (
"time" "time"
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
@ -623,3 +626,47 @@ func (d *Yun139) getPersonalCloudHost() string {
} }
return d.PersonalCloudHost return d.PersonalCloudHost
} }
func (d *Yun139) uploadPersonalParts(ctx context.Context, partInfos []PartInfo, uploadPartInfos []PersonalPartInfo, rateLimited *driver.RateLimitReader, p *driver.Progress) error {
// 确保数组以 PartNumber 从小到大排序
sort.Slice(uploadPartInfos, func(i, j int) bool {
return uploadPartInfos[i].PartNumber < uploadPartInfos[j].PartNumber
})
for _, uploadPartInfo := range uploadPartInfos {
index := uploadPartInfo.PartNumber - 1
if index < 0 || index >= len(partInfos) {
return fmt.Errorf("invalid PartNumber %d: index out of bounds (partInfos length: %d)", uploadPartInfo.PartNumber, len(partInfos))
}
partSize := partInfos[index].PartSize
log.Debugf("[139] uploading part %+v/%+v", index, len(partInfos))
limitReader := io.LimitReader(rateLimited, partSize)
r := io.TeeReader(limitReader, p)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartInfo.UploadUrl, r)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", fmt.Sprint(partSize))
req.Header.Set("Origin", "https://yun.139.com")
req.Header.Set("Referer", "https://yun.139.com/")
req.ContentLength = partSize
err = func() error {
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
log.Debugf("[139] uploaded: %+v", res)
if res.StatusCode != http.StatusOK {
body, _ := io.ReadAll(res.Body)
return fmt.Errorf("unexpected status code: %d, body: %s", res.StatusCode, string(body))
}
return nil
}()
if err != nil {
return err
}
}
return nil
}

View File

@ -365,11 +365,10 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
log.Debugf("uploadData: %+v", uploadData) log.Debugf("uploadData: %+v", uploadData)
requestURL := uploadData.RequestURL requestURL := uploadData.RequestURL
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&") uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) req, err := http.NewRequestWithContext(ctx, http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
for _, v := range uploadHeaders { for _, v := range uploadHeaders {
i := strings.Index(v, "=") i := strings.Index(v, "=")
req.Header.Set(v[0:i], v[i+1:]) req.Header.Set(v[0:i], v[i+1:])

View File

@ -1,7 +1,6 @@
package _189_tv package _189_tv
import ( import (
"container/ring"
"context" "context"
"net/http" "net/http"
"strconv" "strconv"
@ -12,18 +11,20 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
) )
type Cloud189TV struct { type Cloud189TV struct {
model.Storage model.Storage
Addition Addition
client *resty.Client client *resty.Client
tokenInfo *AppSessionResp tokenInfo *AppSessionResp
uploadThread int uploadThread int
familyTransferFolder *ring.Ring storageConfig driver.Config
cleanFamilyTransferFile func()
storageConfig driver.Config TempUuid string
cron *cron.Cron // 新增 cron 字段
} }
func (y *Cloud189TV) Config() driver.Config { func (y *Cloud189TV) Config() driver.Config {
@ -79,10 +80,17 @@ func (y *Cloud189TV) Init(ctx context.Context) (err error) {
} }
} }
y.cron = cron.NewCron(time.Minute * 5)
y.cron.Do(y.keepAlive)
return return
} }
func (y *Cloud189TV) Drop(ctx context.Context) error { func (y *Cloud189TV) Drop(ctx context.Context) error {
if y.cron != nil {
y.cron.Stop()
y.cron = nil
}
return nil return nil
} }

View File

@ -8,7 +8,6 @@ import (
type Addition struct { type Addition struct {
driver.RootID driver.RootID
AccessToken string `json:"access_token"` AccessToken string `json:"access_token"`
TempUuid string
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"` OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
Type string `json:"type" type:"select" options:"personal,family" default:"personal"` Type string `json:"type" type:"select" options:"personal,family" default:"personal"`

View File

@ -5,17 +5,19 @@ import (
"encoding/base64" "encoding/base64"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"github.com/skip2/go-qrcode"
"io" "io"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/skip2/go-qrcode"
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
@ -64,6 +66,10 @@ func (y *Cloud189TV) AppKeySignatureHeader(url, method string) map[string]string
} }
func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, isFamily ...bool) ([]byte, error) { func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, isFamily ...bool) ([]byte, error) {
return y.requestWithRetry(url, method, callback, params, resp, 0, isFamily...)
}
func (y *Cloud189TV) requestWithRetry(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, retryCount int, isFamily ...bool) ([]byte, error) {
req := y.client.R().SetQueryParams(clientSuffix()) req := y.client.R().SetQueryParams(clientSuffix())
if params != nil { if params != nil {
@ -89,7 +95,22 @@ func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, para
if strings.Contains(res.String(), "userSessionBO is null") || if strings.Contains(res.String(), "userSessionBO is null") ||
strings.Contains(res.String(), "InvalidSessionKey") { strings.Contains(res.String(), "InvalidSessionKey") {
return nil, errors.New("session expired") // 限制重试次数,避免无限递归
if retryCount >= 3 {
y.Addition.AccessToken = ""
op.MustSaveDriverStorage(y)
return nil, errors.New("session expired after retry")
}
// 尝试刷新会话
if err := y.refreshSession(); err != nil {
// 如果刷新失败说明AccessToken也已过期需要重新登录
y.Addition.AccessToken = ""
op.MustSaveDriverStorage(y)
return nil, errors.New("session expired")
}
// 如果刷新成功,则重试原始请求(增加重试计数)
return y.requestWithRetry(url, method, callback, params, resp, retryCount+1, isFamily...)
} }
// 处理错误 // 处理错误
@ -129,6 +150,7 @@ func (y *Cloud189TV) put(ctx context.Context, url string, headers map[string]str
} }
} }
// 请求完成后http.Client会Close Request.Body
resp, err := base.HttpClient.Do(req) resp, err := base.HttpClient.Do(req)
if err != nil { if err != nil {
return nil, err return nil, err
@ -208,7 +230,7 @@ func (y *Cloud189TV) login() (err error) {
var erron RespErr var erron RespErr
var tokenInfo AppSessionResp var tokenInfo AppSessionResp
if y.Addition.AccessToken == "" { if y.Addition.AccessToken == "" {
if y.Addition.TempUuid == "" { if y.TempUuid == "" {
// 获取登录参数 // 获取登录参数
var uuidInfo UuidInfoResp var uuidInfo UuidInfoResp
req.SetResult(&uuidInfo).SetError(&erron) req.SetResult(&uuidInfo).SetError(&erron)
@ -227,7 +249,7 @@ func (y *Cloud189TV) login() (err error) {
if uuidInfo.Uuid == "" { if uuidInfo.Uuid == "" {
return errors.New("uuidInfo is empty") return errors.New("uuidInfo is empty")
} }
y.Addition.TempUuid = uuidInfo.Uuid y.TempUuid = uuidInfo.Uuid
op.MustSaveDriverStorage(y) op.MustSaveDriverStorage(y)
// 展示二维码 // 展示二维码
@ -255,7 +277,7 @@ func (y *Cloud189TV) login() (err error) {
// Signature // Signature
req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/qrcodeLoginResult.action", req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/qrcodeLoginResult.action",
http.MethodGet)) http.MethodGet))
req.SetQueryParam("uuid", y.Addition.TempUuid) req.SetQueryParam("uuid", y.TempUuid)
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/qrcodeLoginResult.action") _, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/qrcodeLoginResult.action")
if err != nil { if err != nil {
return return
@ -267,7 +289,6 @@ func (y *Cloud189TV) login() (err error) {
return errors.New("E189AccessToken is empty") return errors.New("E189AccessToken is empty")
} }
y.Addition.AccessToken = accessTokenResp.E189AccessToken y.Addition.AccessToken = accessTokenResp.E189AccessToken
y.Addition.TempUuid = ""
} }
} }
// 获取SessionKey 和 SessionSecret // 获取SessionKey 和 SessionSecret
@ -291,6 +312,44 @@ func (y *Cloud189TV) login() (err error) {
return return
} }
// refreshSession 尝试使用现有的 AccessToken 刷新会话
func (y *Cloud189TV) refreshSession() (err error) {
var erron RespErr
var tokenInfo AppSessionResp
reqb := y.client.R().SetQueryParams(clientSuffix())
reqb.SetResult(&tokenInfo).SetError(&erron)
// Signature
reqb.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/loginFamilyMerge.action",
http.MethodGet))
reqb.SetQueryParam("e189AccessToken", y.Addition.AccessToken)
_, err = reqb.Execute(http.MethodGet, ApiUrl+"/family/manage/loginFamilyMerge.action")
if err != nil {
return
}
if erron.HasError() {
return &erron
}
y.tokenInfo = &tokenInfo
return nil
}
func (y *Cloud189TV) keepAlive() {
_, err := y.get(ApiUrl+"/keepUserSession.action", func(r *resty.Request) {
r.SetQueryParams(clientSuffix())
}, nil)
if err != nil {
utils.Log.Warnf("189tv: Failed to keep user session alive: %v", err)
// 如果keepAlive失败尝试刷新session
if refreshErr := y.refreshSession(); refreshErr != nil {
utils.Log.Errorf("189tv: Failed to refresh session after keepAlive error: %v", refreshErr)
}
} else {
utils.Log.Debugf("189tv: User session kept alive successfully.")
}
}
func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, isFamily bool, overwrite bool) (model.Obj, error) { func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, isFamily bool, overwrite bool) (model.Obj, error) {
fileMd5 := stream.GetHash().GetHash(utils.MD5) fileMd5 := stream.GetHash().GetHash(utils.MD5)
if len(fileMd5) < utils.MD5.Width { if len(fileMd5) < utils.MD5.Width {
@ -311,11 +370,14 @@ func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
// 旧版本上传,家庭云不支持覆盖 // 旧版本上传,家庭云不支持覆盖
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
tempFile, err := file.CacheFullInTempFile() fileMd5 := file.GetHash().GetHash(utils.MD5)
if err != nil { var tempFile = file.GetFile()
return nil, err var err error
if len(fileMd5) != utils.MD5.Width {
tempFile, fileMd5, err = stream.CacheFullAndHash(file, &up, utils.MD5)
} else if tempFile == nil {
tempFile, err = file.CacheFullAndWriter(&up, nil)
} }
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -328,6 +390,10 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
// 网盘中不存在该文件,开始上传 // 网盘中不存在该文件,开始上传
status := GetUploadFileStatusResp{CreateUploadFileResp: *uploadInfo} status := GetUploadFileStatusResp{CreateUploadFileResp: *uploadInfo}
// driver.RateLimitReader会尝试Close底层的reader
// 但这里的tempFile是一个*os.FileClose后就没法继续读了
// 所以这里用io.NopCloser包一层
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile))
for status.GetSize() < file.GetSize() && status.FileDataExists != 1 { for status.GetSize() < file.GetSize() && status.FileDataExists != 1 {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return nil, ctx.Err() return nil, ctx.Err()
@ -345,7 +411,7 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId) header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
} }
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily) _, err := y.put(ctx, status.FileUploadUrl, header, true, rateLimitedRd, isFamily)
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" { if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
return nil, err return nil, err
} }

View File

@ -12,6 +12,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
"github.com/google/uuid" "github.com/google/uuid"
@ -21,12 +22,12 @@ type Cloud189PC struct {
model.Storage model.Storage
Addition Addition
identity string
client *resty.Client client *resty.Client
loginParam *LoginParam loginParam *LoginParam
tokenInfo *AppSessionResp qrcodeParam *QRLoginParam
tokenInfo *AppSessionResp
uploadThread int uploadThread int
@ -35,6 +36,7 @@ type Cloud189PC struct {
storageConfig driver.Config storageConfig driver.Config
ref *Cloud189PC ref *Cloud189PC
cron *cron.Cron
} }
func (y *Cloud189PC) Config() driver.Config { func (y *Cloud189PC) Config() driver.Config {
@ -84,14 +86,22 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
}) })
} }
// 避免重复登陆 // 先尝试用Token刷新之后尝试登陆
identity := utils.GetMD5EncodeStr(y.Username + y.Password) if y.Addition.RefreshToken != "" {
if !y.isLogin() || y.identity != identity { y.tokenInfo = &AppSessionResp{RefreshToken: y.Addition.RefreshToken}
y.identity = identity if err = y.refreshToken(); err != nil {
return
}
} else {
if err = y.login(); err != nil { if err = y.login(); err != nil {
return return
} }
} }
// 初始化并启动 cron 任务
y.cron = cron.NewCron(time.Duration(time.Minute * 5))
// 每5分钟执行一次 keepAlive
y.cron.Do(y.keepAlive)
} }
// 处理家庭云ID // 处理家庭云ID
@ -128,6 +138,10 @@ func (d *Cloud189PC) InitReference(storage driver.Driver) error {
func (y *Cloud189PC) Drop(ctx context.Context) error { func (y *Cloud189PC) Drop(ctx context.Context) error {
y.ref = nil y.ref = nil
if y.cron != nil {
y.cron.Stop()
y.cron = nil
}
return nil return nil
} }

View File

@ -80,6 +80,20 @@ func timestamp() int64 {
return time.Now().UTC().UnixNano() / 1e6 return time.Now().UTC().UnixNano() / 1e6
} }
// formatDate formats a time.Time object into the "YYYY-MM-DDHH:mm:ssSSS" format.
func formatDate(t time.Time) string {
// The layout string "2006-01-0215:04:05.000" corresponds to:
// 2006 -> Year (YYYY)
// 01 -> Month (MM)
// 02 -> Day (DD)
// 15 -> Hour (HH)
// 04 -> Minute (mm)
// 05 -> Second (ss)
// 000 -> Millisecond (SSS) with leading zeros
// Note the lack of a separator between the date and hour, matching the desired output.
return t.Format("2006-01-0215:04:05.000")
}
func MustParseTime(str string) *time.Time { func MustParseTime(str string) *time.Time {
lastOpTime, _ := time.ParseInLocation("2006-01-02 15:04:05 -07", str+" +08", time.Local) lastOpTime, _ := time.ParseInLocation("2006-01-02 15:04:05 -07", str+" +08", time.Local)
return &lastOpTime return &lastOpTime

View File

@ -6,9 +6,11 @@ import (
) )
type Addition struct { type Addition struct {
Username string `json:"username" required:"true"` LoginType string `json:"login_type" type:"select" options:"password,qrcode" default:"password" required:"true"`
Password string `json:"password" required:"true"` Username string `json:"username" required:"true"`
VCode string `json:"validate_code"` Password string `json:"password" required:"true"`
VCode string `json:"validate_code"`
RefreshToken string `json:"refresh_token" help:"To switch accounts, please clear this field"`
driver.RootID driver.RootID
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"` OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`

View File

@ -68,15 +68,7 @@ func (e *RespErr) Error() string {
return "" return ""
} }
// 登陆需要的参数 type BaseLoginParam struct {
type LoginParam struct {
// 加密后的用户名和密码
RsaUsername string
RsaPassword string
// rsa密钥
jRsaKey string
// 请求头参数 // 请求头参数
Lt string Lt string
ReqId string ReqId string
@ -88,6 +80,27 @@ type LoginParam struct {
CaptchaToken string CaptchaToken string
} }
// QRLoginParam 用于暂存二维码登录过程中的参数
type QRLoginParam struct {
BaseLoginParam
UUID string `json:"uuid"`
EncodeUUID string `json:"encodeuuid"`
EncryUUID string `json:"encryuuid"`
}
// 登陆需要的参数
type LoginParam struct {
// 加密后的用户名和密码
RsaUsername string
RsaPassword string
// rsa密钥
jRsaKey string
BaseLoginParam
}
// 登陆加密相关 // 登陆加密相关
type EncryptConfResp struct { type EncryptConfResp struct {
Result int `json:"result"` Result int `json:"result"`

View File

@ -7,6 +7,7 @@ import (
"encoding/hex" "encoding/hex"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"hash"
"io" "io"
"net/http" "net/http"
"net/http/cookiejar" "net/http/cookiejar"
@ -28,6 +29,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup" "github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/skip2/go-qrcode"
"github.com/avast/retry-go" "github.com/avast/retry-go"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
@ -53,6 +55,9 @@ const (
MAC = "TELEMAC" MAC = "TELEMAC"
CHANNEL_ID = "web_cloud.189.cn" CHANNEL_ID = "web_cloud.189.cn"
// Error codes
UserInvalidOpenTokenError = "UserInvalidOpenToken"
) )
func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string { func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string {
@ -263,7 +268,14 @@ func (y *Cloud189PC) findFileByName(ctx context.Context, searchName string, fold
} }
} }
func (y *Cloud189PC) login() (err error) { func (y *Cloud189PC) login() error {
if y.LoginType == "qrcode" {
return y.loginByQRCode()
}
return y.loginByPassword()
}
func (y *Cloud189PC) loginByPassword() (err error) {
// 初始化登陆所需参数 // 初始化登陆所需参数
if y.loginParam == nil { if y.loginParam == nil {
if err = y.initLoginParam(); err != nil { if err = y.initLoginParam(); err != nil {
@ -277,10 +289,15 @@ func (y *Cloud189PC) login() (err error) {
// 销毁登陆参数 // 销毁登陆参数
y.loginParam = nil y.loginParam = nil
// 遇到错误,重新加载登陆参数(刷新验证码) // 遇到错误,重新加载登陆参数(刷新验证码)
if err != nil && y.NoUseOcr { if err != nil {
if err1 := y.initLoginParam(); err1 != nil { if y.NoUseOcr {
err = fmt.Errorf("err1: %s \nerr2: %s", err, err1) if err1 := y.initLoginParam(); err1 != nil {
err = fmt.Errorf("err1: %s \nerr2: %s", err, err1)
}
} }
y.Status = err.Error()
op.MustSaveDriverStorage(y)
} }
}() }()
@ -335,14 +352,105 @@ func (y *Cloud189PC) login() (err error) {
err = fmt.Errorf(tokenInfo.ResMessage) err = fmt.Errorf(tokenInfo.ResMessage)
return return
} }
y.Addition.RefreshToken = tokenInfo.RefreshToken
y.tokenInfo = &tokenInfo y.tokenInfo = &tokenInfo
op.MustSaveDriverStorage(y)
return return
} }
/* 初始化登陆需要的参数 func (y *Cloud189PC) loginByQRCode() error {
* 如果遇到验证码返回错误 if y.qrcodeParam == nil {
*/ if err := y.initQRCodeParam(); err != nil {
func (y *Cloud189PC) initLoginParam() error { // 二维码也通过错误返回
return err
}
}
var state struct {
Status int `json:"status"`
RedirectUrl string `json:"redirectUrl"`
Msg string `json:"msg"`
}
now := time.Now()
_, err := y.client.R().
SetHeaders(map[string]string{
"Referer": AUTH_URL,
"Reqid": y.qrcodeParam.ReqId,
"lt": y.qrcodeParam.Lt,
}).
SetFormData(map[string]string{
"appId": APP_ID,
"clientType": CLIENT_TYPE,
"returnUrl": RETURN_URL,
"paramId": y.qrcodeParam.ParamId,
"uuid": y.qrcodeParam.UUID,
"encryuuid": y.qrcodeParam.EncryUUID,
"date": formatDate(now),
"timeStamp": fmt.Sprint(now.UTC().UnixNano() / 1e6),
}).
ForceContentType("application/json;charset=UTF-8").
SetResult(&state).
Post(AUTH_URL + "/api/logbox/oauth2/qrcodeLoginState.do")
if err != nil {
return fmt.Errorf("failed to check QR code state: %w", err)
}
switch state.Status {
case 0: // 登录成功
var tokenInfo AppSessionResp
_, err = y.client.R().
SetResult(&tokenInfo).
SetQueryParams(clientSuffix()).
SetQueryParam("redirectURL", state.RedirectUrl).
Post(API_URL + "/getSessionForPC.action")
if err != nil {
return err
}
if tokenInfo.ResCode != 0 {
return fmt.Errorf(tokenInfo.ResMessage)
}
y.Addition.RefreshToken = tokenInfo.RefreshToken
y.tokenInfo = &tokenInfo
op.MustSaveDriverStorage(y)
return nil
case -11001: // 二维码过期
y.qrcodeParam = nil
return errors.New("QR code expired, please try again")
case -106: // 等待扫描
return y.genQRCode("QR code has not been scanned yet, please scan and save again")
case -11002: // 等待确认
return y.genQRCode("QR code has been scanned, please confirm the login on your phone and save again")
default: // 其他错误
y.qrcodeParam = nil
return fmt.Errorf("QR code login failed with status %d: %s", state.Status, state.Msg)
}
}
func (y *Cloud189PC) genQRCode(text string) error {
// 展示二维码
qrTemplate := `<body>
state: %s
<br><img src="data:image/jpeg;base64,%s"/>
<br>Or Click here: <a href="%s">Login</a>
</body>`
// Generate QR code
qrCode, err := qrcode.Encode(y.qrcodeParam.UUID, qrcode.Medium, 256)
if err != nil {
return fmt.Errorf("failed to generate QR code: %v", err)
}
// Encode QR code to base64
qrCodeBase64 := base64.StdEncoding.EncodeToString(qrCode)
// Create the HTML page
qrPage := fmt.Sprintf(qrTemplate, text, qrCodeBase64, y.qrcodeParam.UUID)
return fmt.Errorf("need verify: \n%s", qrPage)
}
func (y *Cloud189PC) initBaseParams() (*BaseLoginParam, error) {
// 清除cookie // 清除cookie
jar, _ := cookiejar.New(nil) jar, _ := cookiejar.New(nil)
y.client.SetCookieJar(jar) y.client.SetCookieJar(jar)
@ -356,17 +464,30 @@ func (y *Cloud189PC) initLoginParam() error {
}). }).
Get(WEB_URL + "/api/portal/unifyLoginForPC.action") Get(WEB_URL + "/api/portal/unifyLoginForPC.action")
if err != nil { if err != nil {
return err return nil, err
} }
param := LoginParam{ return &BaseLoginParam{
CaptchaToken: regexp.MustCompile(`'captchaToken' value='(.+?)'`).FindStringSubmatch(res.String())[1], CaptchaToken: regexp.MustCompile(`'captchaToken' value='(.+?)'`).FindStringSubmatch(res.String())[1],
Lt: regexp.MustCompile(`lt = "(.+?)"`).FindStringSubmatch(res.String())[1], Lt: regexp.MustCompile(`lt = "(.+?)"`).FindStringSubmatch(res.String())[1],
ParamId: regexp.MustCompile(`paramId = "(.+?)"`).FindStringSubmatch(res.String())[1], ParamId: regexp.MustCompile(`paramId = "(.+?)"`).FindStringSubmatch(res.String())[1],
ReqId: regexp.MustCompile(`reqId = "(.+?)"`).FindStringSubmatch(res.String())[1], ReqId: regexp.MustCompile(`reqId = "(.+?)"`).FindStringSubmatch(res.String())[1],
// jRsaKey: regexp.MustCompile(`"j_rsaKey" value="(.+?)"`).FindStringSubmatch(res.String())[1], }, nil
}
/* 初始化登陆需要的参数
* 如果遇到验证码返回错误
*/
func (y *Cloud189PC) initLoginParam() error {
y.loginParam = nil
baseParam, err := y.initBaseParams()
if err != nil {
return err
} }
y.loginParam = &LoginParam{BaseLoginParam: *baseParam}
// 获取rsa公钥 // 获取rsa公钥
var encryptConf EncryptConfResp var encryptConf EncryptConfResp
_, err = y.client.R(). _, err = y.client.R().
@ -377,18 +498,17 @@ func (y *Cloud189PC) initLoginParam() error {
return err return err
} }
param.jRsaKey = fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", encryptConf.Data.PubKey) y.loginParam.jRsaKey = fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", encryptConf.Data.PubKey)
param.RsaUsername = encryptConf.Data.Pre + RsaEncrypt(param.jRsaKey, y.Username) y.loginParam.RsaUsername = encryptConf.Data.Pre + RsaEncrypt(y.loginParam.jRsaKey, y.Username)
param.RsaPassword = encryptConf.Data.Pre + RsaEncrypt(param.jRsaKey, y.Password) y.loginParam.RsaPassword = encryptConf.Data.Pre + RsaEncrypt(y.loginParam.jRsaKey, y.Password)
y.loginParam = &param
// 判断是否需要验证码 // 判断是否需要验证码
resp, err := y.client.R(). resp, err := y.client.R().
SetHeader("REQID", param.ReqId). SetHeader("REQID", y.loginParam.ReqId).
SetFormData(map[string]string{ SetFormData(map[string]string{
"appKey": APP_ID, "appKey": APP_ID,
"accountType": ACCOUNT_TYPE, "accountType": ACCOUNT_TYPE,
"userName": param.RsaUsername, "userName": y.loginParam.RsaUsername,
}).Post(AUTH_URL + "/api/logbox/oauth2/needcaptcha.do") }).Post(AUTH_URL + "/api/logbox/oauth2/needcaptcha.do")
if err != nil { if err != nil {
return err return err
@ -400,8 +520,8 @@ func (y *Cloud189PC) initLoginParam() error {
// 拉取验证码 // 拉取验证码
imgRes, err := y.client.R(). imgRes, err := y.client.R().
SetQueryParams(map[string]string{ SetQueryParams(map[string]string{
"token": param.CaptchaToken, "token": y.loginParam.CaptchaToken,
"REQID": param.ReqId, "REQID": y.loginParam.ReqId,
"rnd": fmt.Sprint(timestamp()), "rnd": fmt.Sprint(timestamp()),
}). }).
Get(AUTH_URL + "/api/logbox/oauth2/picCaptcha.do") Get(AUTH_URL + "/api/logbox/oauth2/picCaptcha.do")
@ -428,10 +548,38 @@ func (y *Cloud189PC) initLoginParam() error {
return nil return nil
} }
// getQRCode 获取并返回二维码
func (y *Cloud189PC) initQRCodeParam() (err error) {
y.qrcodeParam = nil
baseParam, err := y.initBaseParams()
if err != nil {
return err
}
var qrcodeParam QRLoginParam
_, err = y.client.R().
SetFormData(map[string]string{"appId": APP_ID}).
ForceContentType("application/json;charset=UTF-8").
SetResult(&qrcodeParam).
Post(AUTH_URL + "/api/logbox/oauth2/getUUID.do")
if err != nil {
return err
}
qrcodeParam.BaseLoginParam = *baseParam
y.qrcodeParam = &qrcodeParam
return y.genQRCode("please scan the QR code with the 189 Cloud app, then save the settings again.")
}
// 刷新会话 // 刷新会话
func (y *Cloud189PC) refreshSession() (err error) { func (y *Cloud189PC) refreshSession() (err error) {
return y.refreshSessionWithRetry(0)
}
func (y *Cloud189PC) refreshSessionWithRetry(retryCount int) (err error) {
if y.ref != nil { if y.ref != nil {
return y.ref.refreshSession() return y.ref.refreshSessionWithRetry(retryCount)
} }
var erron RespErr var erron RespErr
var userSessionResp UserSessionResp var userSessionResp UserSessionResp
@ -448,37 +596,102 @@ func (y *Cloud189PC) refreshSession() (err error) {
return err return err
} }
// 错误影响正常访问,下线该储存 // token生效刷新token
defer func() {
if err != nil {
y.GetStorage().SetStatus(fmt.Sprintf("%+v", err.Error()))
op.MustSaveDriverStorage(y)
}
}()
if erron.HasError() { if erron.HasError() {
if erron.ResCode == "UserInvalidOpenToken" { if erron.ResCode == UserInvalidOpenTokenError {
if err = y.login(); err != nil { return y.refreshTokenWithRetry(retryCount)
return err
}
} }
return &erron return &erron
} }
y.tokenInfo.UserSessionResp = userSessionResp y.tokenInfo.UserSessionResp = userSessionResp
return return nil
}
// refreshToken 刷新token失败时返回错误不再直接调用login
func (y *Cloud189PC) refreshToken() (err error) {
return y.refreshTokenWithRetry(0)
}
func (y *Cloud189PC) refreshTokenWithRetry(retryCount int) (err error) {
if y.ref != nil {
return y.ref.refreshTokenWithRetry(retryCount)
}
// 限制重试次数,避免无限递归
if retryCount >= 3 {
if y.Addition.RefreshToken != "" {
y.Addition.RefreshToken = ""
op.MustSaveDriverStorage(y)
}
return errors.New("refresh token failed after maximum retries")
}
var erron RespErr
var tokenInfo AppSessionResp
_, err = y.client.R().
SetResult(&tokenInfo).
ForceContentType("application/json;charset=UTF-8").
SetError(&erron).
SetFormData(map[string]string{
"clientId": APP_ID,
"refreshToken": y.tokenInfo.RefreshToken,
"grantType": "refresh_token",
"format": "json",
}).
Post(AUTH_URL + "/api/oauth2/refreshToken.do")
if err != nil {
return err
}
// 如果刷新失败,返回错误给上层处理
if erron.HasError() {
if y.Addition.RefreshToken != "" {
y.Addition.RefreshToken = ""
op.MustSaveDriverStorage(y)
}
// 根据登录类型决定下一步行为
if y.LoginType == "qrcode" {
return errors.New("QR code session has expired, please re-scan the code to log in")
}
// 密码登录模式下,尝试回退到完整登录
return y.login()
}
y.Addition.RefreshToken = tokenInfo.RefreshToken
y.tokenInfo = &tokenInfo
op.MustSaveDriverStorage(y)
return y.refreshSessionWithRetry(retryCount + 1)
}
func (y *Cloud189PC) keepAlive() {
_, err := y.get(API_URL+"/keepUserSession.action", func(r *resty.Request) {
r.SetQueryParams(clientSuffix())
}, nil)
if err != nil {
utils.Log.Warnf("189pc: Failed to keep user session alive: %v", err)
// 如果keepAlive失败尝试刷新session
if refreshErr := y.refreshSession(); refreshErr != nil {
utils.Log.Errorf("189pc: Failed to refresh session after keepAlive error: %v", refreshErr)
}
} else {
utils.Log.Debugf("189pc: User session kept alive successfully.")
}
} }
// 普通上传 // 普通上传
// 无法上传大小为0的文件 // 无法上传大小为0的文件
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
size := file.GetSize() // 文件大小
sliceSize := partSize(size) fileSize := file.GetSize()
// 分片大小,不得为文件大小
sliceSize := partSize(fileSize)
params := Params{ params := Params{
"parentFolderId": dstDir.GetID(), "parentFolderId": dstDir.GetID(),
"fileName": url.QueryEscape(file.GetName()), "fileName": url.QueryEscape(file.GetName()),
"fileSize": fmt.Sprint(file.GetSize()), "fileSize": fmt.Sprint(fileSize),
"sliceSize": fmt.Sprint(sliceSize), "sliceSize": fmt.Sprint(sliceSize), // 必须为特定分片大小
"lazyCheck": "1", "lazyCheck": "1",
} }
@ -500,66 +713,100 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
return nil, err return nil, err
} }
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread, ss, err := stream.NewStreamSectionReader(file, int(sliceSize), &up)
if err != nil {
return nil, err
}
threadG, upCtx := errgroup.NewOrderedGroupWithContext(ctx, y.uploadThread,
retry.Attempts(3), retry.Attempts(3),
retry.Delay(time.Second), retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay)) retry.DelayType(retry.BackOffDelay))
count := int(size / sliceSize) count := 1
lastPartSize := size % sliceSize if fileSize > sliceSize {
if lastPartSize > 0 { count = int((fileSize + sliceSize - 1) / sliceSize)
count++ }
} else { lastPartSize := fileSize % sliceSize
if lastPartSize == 0 {
lastPartSize = sliceSize lastPartSize = sliceSize
} }
fileMd5 := utils.MD5.NewFunc()
silceMd5 := utils.MD5.NewFunc()
silceMd5Hexs := make([]string, 0, count) silceMd5Hexs := make([]string, 0, count)
teeReader := io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)) silceMd5 := utils.MD5.NewFunc()
byteSize := sliceSize var writers io.Writer = silceMd5
fileMd5Hex := file.GetHash().GetHash(utils.MD5)
var fileMd5 hash.Hash
if len(fileMd5Hex) != utils.MD5.Width {
fileMd5 = utils.MD5.NewFunc()
writers = io.MultiWriter(silceMd5, fileMd5)
}
for i := 1; i <= count; i++ { for i := 1; i <= count; i++ {
if utils.IsCanceled(upCtx) { if utils.IsCanceled(upCtx) {
break break
} }
offset := int64((i)-1) * sliceSize
partSize := sliceSize
if i == count { if i == count {
byteSize = lastPartSize partSize = lastPartSize
}
byteData := make([]byte, byteSize)
// 读取块
silceMd5.Reset()
if _, err := io.ReadFull(teeReader, byteData); err != io.EOF && err != nil {
return nil, err
} }
partInfo := ""
var reader *stream.SectionReader
var rateLimitedRd io.Reader
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
reader, err = ss.GetSectionReader(offset, partSize)
if err != nil {
return err
}
silceMd5.Reset()
w, err := utils.CopyWithBuffer(writers, reader)
if w != partSize {
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", partSize, w, err)
}
// 计算块md5并进行hex和base64编码
md5Bytes := silceMd5.Sum(nil)
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
partInfo = fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
// 计算块md5并进行hex和base64编码 rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
md5Bytes := silceMd5.Sum(nil) }
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes))) return nil
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes)) },
Do: func(ctx context.Context) error {
reader.Seek(0, io.SeekStart)
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
if err != nil {
return err
}
threadG.Go(func(ctx context.Context) error { // step.4 上传切片
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo) uploadUrl := uploadUrls[0]
if err != nil { _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily)
return err if err != nil {
} return err
}
// step.4 上传切片 up(float64(threadG.Success()) * 100 / float64(count))
uploadUrl := uploadUrls[0] return nil
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, },
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily) After: func(err error) {
if err != nil { ss.FreeSectionReader(reader)
return err },
} },
up(float64(threadG.Success()) * 100 / float64(count)) )
return nil
})
} }
if err = threadG.Wait(); err != nil { if err = threadG.Wait(); err != nil {
return nil, err return nil, err
} }
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil))) if fileMd5 != nil {
fileMd5Hex = strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
}
sliceMd5Hex := fileMd5Hex sliceMd5Hex := fileMd5Hex
if file.GetSize() > sliceSize { if fileSize > sliceSize {
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n"))) sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
} }
@ -620,11 +867,12 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
cache = tmpF cache = tmpF
} }
sliceSize := partSize(size) sliceSize := partSize(size)
count := int(size / sliceSize) count := 1
if size > sliceSize {
count = int((size + sliceSize - 1) / sliceSize)
}
lastSliceSize := size % sliceSize lastSliceSize := size % sliceSize
if lastSliceSize > 0 { if lastSliceSize == 0 {
count++
} else {
lastSliceSize = sliceSize lastSliceSize = sliceSize
} }
@ -738,7 +986,8 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
} }
// step.4 上传切片 // step.4 上传切片
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(cache, offset, byteSize), isFamily) rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize))
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily)
if err != nil { if err != nil {
return err return err
} }
@ -820,9 +1069,7 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo
// 旧版本上传,家庭云不支持覆盖 // 旧版本上传,家庭云不支持覆盖
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50) tempFile, fileMd5, err := stream.CacheFullAndHash(file, &up, utils.MD5)
up = model.UpdateProgressWithRange(up, 50, 100)
tempFile, fileMd5, err := stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -5,6 +5,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"net/url"
stdpath "path" stdpath "path"
"strings" "strings"
@ -12,6 +13,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/fs" "github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign" "github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
@ -77,13 +79,45 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
if !ok { if !ok {
return nil, errs.ObjectNotFound return nil, errs.ObjectNotFound
} }
var ret *model.Object
provider := ""
for _, dst := range dsts { for _, dst := range dsts {
obj, err := d.get(ctx, path, dst, sub) rawPath := stdpath.Join(dst, sub)
if err == nil { obj, err := fs.Get(ctx, rawPath, &fs.GetArgs{NoLog: true})
return obj, nil if err != nil {
continue
}
storage, err := fs.GetStorage(rawPath, &fs.GetStoragesArgs{})
if ret == nil {
ret = &model.Object{
Path: path,
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}
if !d.ProviderPassThrough || err != nil {
break
}
provider = storage.Config().Name
} else if err != nil || provider != storage.GetStorage().Driver {
provider = ""
break
} }
} }
return nil, errs.ObjectNotFound if ret == nil {
return nil, errs.ObjectNotFound
}
if provider != "" {
return &model.ObjectProvider{
Object: *ret,
Provider: model.Provider{
Provider: provider,
},
}, nil
}
return ret, nil
} }
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
@ -99,7 +133,27 @@ func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
var objs []model.Obj var objs []model.Obj
fsArgs := &fs.ListArgs{NoLog: true, Refresh: args.Refresh} fsArgs := &fs.ListArgs{NoLog: true, Refresh: args.Refresh}
for _, dst := range dsts { for _, dst := range dsts {
tmp, err := d.list(ctx, dst, sub, fsArgs) tmp, err := fs.List(ctx, stdpath.Join(dst, sub), fsArgs)
if err == nil {
tmp, err = utils.SliceConvert(tmp, func(obj model.Obj) (model.Obj, error) {
thumb, ok := model.GetThumb(obj)
objRes := model.Object{
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
}
if !ok {
return &objRes, nil
}
return &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
}, nil
})
}
if err == nil { if err == nil {
objs = append(objs, tmp...) objs = append(objs, tmp...)
} }
@ -113,49 +167,78 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if !ok { if !ok {
return nil, errs.ObjectNotFound return nil, errs.ObjectNotFound
} }
// proxy || ftp,s3
if common.GetApiUrl(ctx) == "" {
args.Redirect = false
}
for _, dst := range dsts { for _, dst := range dsts {
reqPath := stdpath.Join(dst, sub) reqPath := stdpath.Join(dst, sub)
link, file, err := d.link(ctx, reqPath, args) link, fi, err := d.link(ctx, reqPath, args)
if err != nil { if err != nil {
continue continue
} }
var resultLink *model.Link if link == nil {
if link != nil { // 重定向且需要通过代理
resultLink = &model.Link{ return &model.Link{
URL: link.URL,
Header: link.Header,
RangeReader: link.RangeReader,
SyncClosers: utils.NewSyncClosers(link),
ContentLength: link.ContentLength,
}
if link.MFile != nil {
resultLink.RangeReader = &model.FileRangeReader{
RangeReaderIF: stream.GetRangeReaderFromMFile(file.GetSize(), link.MFile),
}
}
} else {
resultLink = &model.Link{
URL: fmt.Sprintf("%s/p%s?sign=%s", URL: fmt.Sprintf("%s/p%s?sign=%s",
common.GetApiUrl(ctx), common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true), utils.EncodePath(reqPath, true),
sign.Sign(reqPath)), sign.Sign(reqPath)),
} }, nil
}
resultLink := *link
resultLink.SyncClosers = utils.NewSyncClosers(link)
if args.Redirect {
return &resultLink, nil
} }
if !args.Redirect {
if d.DownloadConcurrency > 0 { if resultLink.ContentLength == 0 {
resultLink.Concurrency = d.DownloadConcurrency resultLink.ContentLength = fi.GetSize()
}
if d.DownloadPartSize > 0 {
resultLink.PartSize = d.DownloadPartSize * utils.KB
}
} }
return resultLink, nil if resultLink.MFile != nil {
return &resultLink, nil
}
if d.DownloadConcurrency > 0 {
resultLink.Concurrency = d.DownloadConcurrency
}
if d.DownloadPartSize > 0 {
resultLink.PartSize = d.DownloadPartSize * utils.KB
}
return &resultLink, nil
} }
return nil, errs.ObjectNotFound return nil, errs.ObjectNotFound
} }
func (d *Alias) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
root, sub := d.getRootAndPath(args.Obj.GetPath())
dsts, ok := d.pathMap[root]
if !ok {
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
rawPath := stdpath.Join(dst, sub)
storage, actualPath, err := op.GetStorageAndActualPath(rawPath)
if err != nil {
continue
}
other, ok := storage.(driver.Other)
if !ok {
continue
}
obj, err := op.GetUnwrap(ctx, storage, actualPath)
if err != nil {
continue
}
return other.Other(ctx, model.OtherArgs{
Obj: obj,
Method: args.Method,
Data: args.Data,
})
}
return nil, errs.NotImplement
}
func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if !d.Writable { if !d.Writable {
return errs.PermissionDenied return errs.PermissionDenied
@ -278,24 +361,29 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer,
reqPath, err := d.getReqPath(ctx, dstDir, true) reqPath, err := d.getReqPath(ctx, dstDir, true)
if err == nil { if err == nil {
if len(reqPath) == 1 { if len(reqPath) == 1 {
return fs.PutDirectly(ctx, *reqPath[0], &stream.FileStream{ storage, reqActualPath, err := op.GetStorageAndActualPath(*reqPath[0])
Obj: s,
Mimetype: s.GetMimetype(),
WebPutAsTask: s.NeedStore(),
Reader: s,
})
} else {
file, err := s.CacheFullInTempFile()
if err != nil { if err != nil {
return err return err
} }
for _, path := range reqPath { return op.Put(ctx, storage, reqActualPath, &stream.FileStream{
Obj: s,
Mimetype: s.GetMimetype(),
Reader: s,
}, up)
} else {
file, err := s.CacheFullAndWriter(nil, nil)
if err != nil {
return err
}
count := float64(len(reqPath) + 1)
up(100 / count)
for i, path := range reqPath {
err = errors.Join(err, fs.PutDirectly(ctx, *path, &stream.FileStream{ err = errors.Join(err, fs.PutDirectly(ctx, *path, &stream.FileStream{
Obj: s, Obj: s,
Mimetype: s.GetMimetype(), Mimetype: s.GetMimetype(),
WebPutAsTask: s.NeedStore(), Reader: file,
Reader: file,
})) }))
up(float64(i+2) / float64(count) * 100)
_, e := file.Seek(0, io.SeekStart) _, e := file.Seek(0, io.SeekStart)
if e != nil { if e != nil {
return errors.Join(err, e) return errors.Join(err, e)
@ -367,10 +455,24 @@ func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveIn
return nil, errs.ObjectNotFound return nil, errs.ObjectNotFound
} }
for _, dst := range dsts { for _, dst := range dsts {
link, err := d.extract(ctx, dst, sub, args) reqPath := stdpath.Join(dst, sub)
if err == nil { link, err := d.extract(ctx, reqPath, args)
return link, nil if err != nil {
continue
} }
if link == nil {
return &model.Link{
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}, nil
}
resultLink := *link
resultLink.SyncClosers = utils.NewSyncClosers(link)
return &resultLink, nil
} }
return nil, errs.NotImplement return nil, errs.NotImplement
} }

View File

@ -15,6 +15,7 @@ type Addition struct {
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"` DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"` DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
Writable bool `json:"writable" type:"bool" default:"false"` Writable bool `json:"writable" type:"bool" default:"false"`
ProviderPassThrough bool `json:"provider_pass_through" type:"bool" default:"false"`
} }
var config = driver.Config{ var config = driver.Config{

View File

@ -2,8 +2,6 @@ package alias
import ( import (
"context" "context"
"fmt"
"net/url"
stdpath "path" stdpath "path"
"strings" "strings"
@ -12,8 +10,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/fs" "github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common" "github.com/OpenListTeam/OpenList/v4/server/common"
) )
@ -54,55 +50,12 @@ func (d *Alias) getRootAndPath(path string) (string, string) {
return parts[0], parts[1] return parts[0], parts[1]
} }
func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Obj, error) {
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
if err != nil {
return nil, err
}
return &model.Object{
Path: path,
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}, nil
}
func (d *Alias) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]model.Obj, error) {
objs, err := fs.List(ctx, stdpath.Join(dst, sub), args)
// the obj must implement the model.SetPath interface
// return objs, err
if err != nil {
return nil, err
}
return utils.SliceConvert(objs, func(obj model.Obj) (model.Obj, error) {
thumb, ok := model.GetThumb(obj)
objRes := model.Object{
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
}
if !ok {
return &objRes, nil
}
return &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
}, nil
})
}
func (d *Alias) link(ctx context.Context, reqPath string, args model.LinkArgs) (*model.Link, model.Obj, error) { func (d *Alias) link(ctx context.Context, reqPath string, args model.LinkArgs) (*model.Link, model.Obj, error) {
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath) storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
// proxy || ftp,s3 if !args.Redirect {
if !args.Redirect || len(common.GetApiUrl(ctx)) == 0 {
return op.Link(ctx, storage, reqActualPath, args) return op.Link(ctx, storage, reqActualPath, args)
} }
obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true}) obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
@ -183,8 +136,7 @@ func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.Arc
return nil, errs.NotImplement return nil, errs.NotImplement
} }
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) { func (d *Alias) extract(ctx context.Context, reqPath string, args model.ArchiveInnerArgs) (*model.Link, error) {
reqPath := stdpath.Join(dst, sub)
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath) storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil { if err != nil {
return nil, err return nil, err
@ -192,20 +144,12 @@ func (d *Alias) extract(ctx context.Context, dst, sub string, args model.Archive
if _, ok := storage.(driver.ArchiveReader); !ok { if _, ok := storage.(driver.ArchiveReader); !ok {
return nil, errs.NotImplement return nil, errs.NotImplement
} }
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(sub)) { if args.Redirect && common.ShouldProxy(storage, stdpath.Base(reqPath)) {
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true}) _, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil { if err == nil {
return nil, err return nil, err
} }
link := &model.Link{ return nil, nil
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}
return link, nil
} }
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args) link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
return link, err return link, err

View File

@ -297,11 +297,10 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
if d.InternalUpload { if d.InternalUpload {
url = partInfo.InternalUploadUrl url = partInfo.InternalUploadUrl
} }
req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT)) req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, io.LimitReader(rateLimited, DEFAULT))
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
res, err := base.HttpClient.Do(req) res, err := base.HttpClient.Do(req)
if err != nil { if err != nil {
return err return err

View File

@ -3,7 +3,6 @@ package aliyundrive_open
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"net/http" "net/http"
"path/filepath" "path/filepath"
"time" "time"
@ -13,7 +12,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/rateg"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -24,9 +22,8 @@ type AliyundriveOpen struct {
DriveId string DriveId string
limitList func(ctx context.Context, data base.Json) (*Files, error) limiter *limiter
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error) ref *AliyundriveOpen
ref *AliyundriveOpen
} }
func (d *AliyundriveOpen) Config() driver.Config { func (d *AliyundriveOpen) Config() driver.Config {
@ -38,25 +35,23 @@ func (d *AliyundriveOpen) GetAddition() driver.Additional {
} }
func (d *AliyundriveOpen) Init(ctx context.Context) error { func (d *AliyundriveOpen) Init(ctx context.Context) error {
d.limiter = getLimiterForUser(globalLimiterUserID) // First create a globally shared limiter to limit the initial requests.
if d.LIVPDownloadFormat == "" { if d.LIVPDownloadFormat == "" {
d.LIVPDownloadFormat = "jpeg" d.LIVPDownloadFormat = "jpeg"
} }
if d.DriveType == "" { if d.DriveType == "" {
d.DriveType = "default" d.DriveType = "default"
} }
res, err := d.request("/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil) res, err := d.request(ctx, limiterOther, "/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
if err != nil { if err != nil {
d.limiter.free()
d.limiter = nil
return err return err
} }
d.DriveId = utils.Json.Get(res, d.DriveType+"_drive_id").ToString() d.DriveId = utils.Json.Get(res, d.DriveType+"_drive_id").ToString()
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{ userid := utils.Json.Get(res, "user_id").ToString()
Limit: 4, d.limiter.free()
Bucket: 1, d.limiter = getLimiterForUser(userid) // Allocate a corresponding limiter for each user.
})
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
Limit: 1,
Bucket: 1,
})
return nil return nil
} }
@ -70,6 +65,8 @@ func (d *AliyundriveOpen) InitReference(storage driver.Driver) error {
} }
func (d *AliyundriveOpen) Drop(ctx context.Context) error { func (d *AliyundriveOpen) Drop(ctx context.Context) error {
d.limiter.free()
d.limiter = nil
d.ref = nil d.ref = nil
return nil return nil
} }
@ -87,9 +84,6 @@ func (d *AliyundriveOpen) GetRoot(ctx context.Context) (model.Obj, error) {
} }
func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if d.limitList == nil {
return nil, fmt.Errorf("driver not init")
}
files, err := d.getFiles(ctx, dir.GetID()) files, err := d.getFiles(ctx, dir.GetID())
if err != nil { if err != nil {
return nil, err return nil, err
@ -107,8 +101,8 @@ func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.Li
return objs, err return objs, err
} }
func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link, error) { func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
res, err := d.request("/adrive/v1.0/openFile/getDownloadUrl", http.MethodPost, func(req *resty.Request) { res, err := d.request(ctx, limiterLink, "/adrive/v1.0/openFile/getDownloadUrl", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": file.GetID(), "file_id": file.GetID(),
@ -132,17 +126,10 @@ func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link
}, nil }, nil
} }
func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.limitLink == nil {
return nil, fmt.Errorf("driver not init")
}
return d.limitLink(ctx, file)
}
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
nowTime, _ := getNowTime() nowTime, _ := getNowTime()
newDir := File{CreatedAt: nowTime, UpdatedAt: nowTime} newDir := File{CreatedAt: nowTime, UpdatedAt: nowTime}
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"parent_file_id": parentDir.GetID(), "parent_file_id": parentDir.GetID(),
@ -168,7 +155,7 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var resp MoveOrCopyResp var resp MoveOrCopyResp
_, err := d.request("/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": srcObj.GetID(), "file_id": srcObj.GetID(),
@ -198,7 +185,7 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (m
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
var newFile File var newFile File
_, err := d.request("/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": srcObj.GetID(), "file_id": srcObj.GetID(),
@ -230,7 +217,7 @@ func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
var resp MoveOrCopyResp var resp MoveOrCopyResp
_, err := d.request("/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": srcObj.GetID(), "file_id": srcObj.GetID(),
@ -256,7 +243,7 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
if d.RemoveWay == "delete" { if d.RemoveWay == "delete" {
uri = "/adrive/v1.0/openFile/delete" uri = "/adrive/v1.0/openFile/delete"
} }
_, err := d.request(uri, http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, uri, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": obj.GetID(), "file_id": obj.GetID(),
@ -295,7 +282,7 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
default: default:
return nil, errs.NotSupport return nil, errs.NotSupport
} }
_, err := d.request(uri, http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, uri, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp) req.SetBody(data).SetResult(&resp)
}) })
if err != nil { if err != nil {
@ -304,6 +291,21 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
return resp, nil return resp, nil
} }
func (d *AliyundriveOpen) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
res, err := d.request(ctx, limiterOther, "/adrive/v1.0/user/getSpaceInfo", http.MethodPost, nil)
if err != nil {
return nil, err
}
total := utils.Json.Get(res, "personal_space_info", "total_size").ToUint64()
used := utils.Json.Get(res, "personal_space_info", "used_size").ToUint64()
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: total - used,
},
}, nil
}
var _ driver.Driver = (*AliyundriveOpen)(nil) var _ driver.Driver = (*AliyundriveOpen)(nil)
var _ driver.MkdirResult = (*AliyundriveOpen)(nil) var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
var _ driver.MoveResult = (*AliyundriveOpen)(nil) var _ driver.MoveResult = (*AliyundriveOpen)(nil)

View File

@ -0,0 +1,96 @@
package aliyundrive_open
import (
"context"
"fmt"
"sync"
"golang.org/x/time/rate"
)
// See document https://www.yuque.com/aliyundrive/zpfszx/mqocg38hlxzc5vcd
// See issue https://github.com/OpenListTeam/OpenList/issues/724
// We got limit per user per app, so the limiter should be global.
type limiterType int
const (
limiterList limiterType = iota
limiterLink
limiterOther
)
const (
listRateLimit = 3.9 // 4 per second in document, but we use 3.9 per second to be safe
linkRateLimit = 0.9 // 1 per second in document, but we use 0.9 per second to be safe
otherRateLimit = 14.9 // 15 per second in document, but we use 14.9 per second to be safe
globalLimiterUserID = "" // Global limiter user ID, used to limit the initial requests.
)
type limiter struct {
usedBy int
list *rate.Limiter
link *rate.Limiter
other *rate.Limiter
}
var limiters = make(map[string]*limiter)
var limitersLock = &sync.Mutex{}
func getLimiterForUser(userid string) *limiter {
limitersLock.Lock()
defer limitersLock.Unlock()
defer func() {
// Clean up limiters that are no longer used.
for id, lim := range limiters {
if lim.usedBy <= 0 && id != globalLimiterUserID { // Do not delete the global limiter.
delete(limiters, id)
}
}
}()
if lim, ok := limiters[userid]; ok {
lim.usedBy++
return lim
}
lim := &limiter{
usedBy: 1,
list: rate.NewLimiter(rate.Limit(listRateLimit), 1),
link: rate.NewLimiter(rate.Limit(linkRateLimit), 1),
other: rate.NewLimiter(rate.Limit(otherRateLimit), 1),
}
limiters[userid] = lim
return lim
}
func (l *limiter) wait(ctx context.Context, typ limiterType) error {
if l == nil {
return fmt.Errorf("driver not init")
}
switch typ {
case limiterList:
return l.list.Wait(ctx)
case limiterLink:
return l.link.Wait(ctx)
case limiterOther:
return l.other.Wait(ctx)
default:
return fmt.Errorf("unknown limiter type")
}
}
func (l *limiter) free() {
if l == nil {
return
}
limitersLock.Lock()
defer limitersLock.Unlock()
l.usedBy--
}
func (d *AliyundriveOpen) wait(ctx context.Context, typ limiterType) error {
if d == nil {
return fmt.Errorf("driver not init")
}
if d.ref != nil {
return d.ref.wait(ctx, typ) // If this is a reference driver, wait on the reference driver.
}
return d.limiter.wait(ctx, typ)
}

View File

@ -50,10 +50,10 @@ func calPartSize(fileSize int64) int64 {
return partSize return partSize
} }
func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]PartInfo, error) { func (d *AliyundriveOpen) getUploadUrl(ctx context.Context, count int, fileId, uploadId string) ([]PartInfo, error) {
partInfoList := makePartInfos(count) partInfoList := makePartInfos(count)
var resp CreateResp var resp CreateResp
_, err := d.request("/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": fileId, "file_id": fileId,
@ -69,7 +69,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
if d.InternalUpload { if d.InternalUpload {
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/") uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
} }
req, err := http.NewRequestWithContext(ctx, "PUT", uploadUrl, r) req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, r)
if err != nil { if err != nil {
return err return err
} }
@ -84,10 +84,10 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
return nil return nil
} }
func (d *AliyundriveOpen) completeUpload(fileId, uploadId string) (model.Obj, error) { func (d *AliyundriveOpen) completeUpload(ctx context.Context, fileId, uploadId string) (model.Obj, error) {
// 3. complete // 3. complete
var newFile File var newFile File
_, err := d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": fileId, "file_id": fileId,
@ -137,11 +137,8 @@ func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error
} }
buf := make([]byte, length) buf := make([]byte, length)
n, err := io.ReadFull(reader, buf) n, err := io.ReadFull(reader, buf)
if err == io.ErrUnexpectedEOF { if n != int(length) {
return "", fmt.Errorf("can't read data, expected=%d, got=%d", len(buf), n) return "", fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
}
if err != nil {
return "", err
} }
return base64.StdEncoding.EncodeToString(buf), nil return base64.StdEncoding.EncodeToString(buf), nil
} }
@ -183,7 +180,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
createData["pre_hash"] = hash createData["pre_hash"] = hash
} }
var createResp CreateResp var createResp CreateResp
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) { _, err, e := d.requestReturnErrResp(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp) req.SetBody(createData).SetResult(&createResp)
}) })
if err != nil { if err != nil {
@ -194,9 +191,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
hash := stream.GetHash().GetHash(utils.SHA1) hash := stream.GetHash().GetHash(utils.SHA1)
if len(hash) != utils.SHA1.Width { if len(hash) != utils.SHA1.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50) _, hash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
up = model.UpdateProgressWithRange(up, 50, 100)
_, hash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA1)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -210,7 +205,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if err != nil { if err != nil {
return nil, fmt.Errorf("cal proof code error: %s", err.Error()) return nil, fmt.Errorf("cal proof code error: %s", err.Error())
} }
_, err = d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) { _, err = d.request(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp) req.SetBody(createData).SetResult(&createResp)
}) })
if err != nil { if err != nil {
@ -221,17 +216,20 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if !createResp.RapidUpload { if !createResp.RapidUpload {
// 2. normal upload // 2. normal upload
log.Debugf("[aliyundive_open] normal upload") log.Debugf("[aliyundive_open] normal upload")
ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize), &up)
if err != nil {
return nil, err
}
preTime := time.Now() preTime := time.Now()
var offset, length int64 = 0, partSize var offset, length int64 = 0, partSize
//var length
for i := 0; i < len(createResp.PartInfoList); i++ { for i := 0; i < len(createResp.PartInfoList); i++ {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return nil, ctx.Err() return nil, ctx.Err()
} }
// refresh upload url if 50 minutes passed // refresh upload url if 50 minutes passed
if time.Since(preTime) > 50*time.Minute { if time.Since(preTime) > 50*time.Minute {
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId) createResp.PartInfoList, err = d.getUploadUrl(ctx, count, createResp.FileId, createResp.UploadId)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -240,22 +238,19 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if remain := stream.GetSize() - offset; length > remain { if remain := stream.GetSize() - offset; length > remain {
length = remain length = remain
} }
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize)) rd, err := ss.GetSectionReader(offset, length)
if rapidUpload { if err != nil {
srd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length}) return nil, err
if err != nil {
return nil, err
}
rd = utils.NewMultiReadable(srd)
} }
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
err = retry.Do(func() error { err = retry.Do(func() error {
_ = rd.Reset() rd.Seek(0, io.SeekStart)
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i]) return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i])
}, },
retry.Attempts(3), retry.Attempts(3),
retry.DelayType(retry.BackOffDelay), retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second)) retry.Delay(time.Second))
ss.FreeSectionReader(rd)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -268,5 +263,5 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp) log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp)
// 3. complete // 3. complete
return d.completeUpload(createResp.FileId, createResp.UploadId) return d.completeUpload(ctx, createResp.FileId, createResp.UploadId)
} }

View File

@ -19,7 +19,7 @@ import (
// do others that not defined in Driver interface // do others that not defined in Driver interface
func (d *AliyundriveOpen) _refreshToken() (string, string, error) { func (d *AliyundriveOpen) _refreshToken(ctx context.Context) (string, string, error) {
if d.UseOnlineAPI && d.APIAddress != "" { if d.UseOnlineAPI && d.APIAddress != "" {
u := d.APIAddress u := d.APIAddress
var resp struct { var resp struct {
@ -33,8 +33,11 @@ func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
if d.AlipanType == "alipanTV" { if d.AlipanType == "alipanTV" {
driverTxt = "alicloud_tv" driverTxt = "alicloud_tv"
} }
err := d.wait(ctx, limiterOther)
_, err := base.RestyClient.R(). if err != nil {
return "", "", err
}
_, err = base.RestyClient.R().
SetHeader("User-Agent", "Mozilla/5.0 (Macintosh; Apple macOS 15_5) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36 Chrome/138.0.0.0 Openlist/425.6.30"). SetHeader("User-Agent", "Mozilla/5.0 (Macintosh; Apple macOS 15_5) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36 Chrome/138.0.0.0 Openlist/425.6.30").
SetResult(&resp). SetResult(&resp).
SetQueryParams(map[string]string{ SetQueryParams(map[string]string{
@ -54,11 +57,14 @@ func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
} }
return resp.RefreshToken, resp.AccessToken, nil return resp.RefreshToken, resp.AccessToken, nil
} }
// 本地刷新逻辑,必须要求 client_id 和 client_secret // 本地刷新逻辑,必须要求 client_id 和 client_secret
if d.ClientID == "" || d.ClientSecret == "" { if d.ClientID == "" || d.ClientSecret == "" {
return "", "", fmt.Errorf("empty ClientID or ClientSecret") return "", "", fmt.Errorf("empty ClientID or ClientSecret")
} }
err := d.wait(ctx, limiterOther)
if err != nil {
return "", "", err
}
url := API_URL + "/oauth/access_token" url := API_URL + "/oauth/access_token"
//var resp base.TokenResp //var resp base.TokenResp
var e ErrResp var e ErrResp
@ -110,18 +116,18 @@ func getSub(token string) (string, error) {
return utils.Json.Get(bs, "sub").ToString(), nil return utils.Json.Get(bs, "sub").ToString(), nil
} }
func (d *AliyundriveOpen) refreshToken() error { func (d *AliyundriveOpen) refreshToken(ctx context.Context) error {
if d.ref != nil { if d.ref != nil {
return d.ref.refreshToken() return d.ref.refreshToken(ctx)
} }
refresh, access, err := d._refreshToken() refresh, access, err := d._refreshToken(ctx)
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
if err == nil { if err == nil {
break break
} else { } else {
log.Errorf("[ali_open] failed to refresh token: %s", err) log.Errorf("[ali_open] failed to refresh token: %s", err)
} }
refresh, access, err = d._refreshToken() refresh, access, err = d._refreshToken(ctx)
} }
if err != nil { if err != nil {
return err return err
@ -132,12 +138,12 @@ func (d *AliyundriveOpen) refreshToken() error {
return nil return nil
} }
func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) { func (d *AliyundriveOpen) request(ctx context.Context, limitTy limiterType, uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
b, err, _ := d.requestReturnErrResp(uri, method, callback, retry...) b, err, _ := d.requestReturnErrResp(ctx, limitTy, uri, method, callback, retry...)
return b, err return b, err
} }
func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) { func (d *AliyundriveOpen) requestReturnErrResp(ctx context.Context, limitTy limiterType, uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
req := base.RestyClient.R() req := base.RestyClient.R()
// TODO check whether access_token is expired // TODO check whether access_token is expired
req.SetHeader("Authorization", "Bearer "+d.getAccessToken()) req.SetHeader("Authorization", "Bearer "+d.getAccessToken())
@ -149,6 +155,10 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
} }
var e ErrResp var e ErrResp
req.SetError(&e) req.SetError(&e)
err := d.wait(ctx, limitTy)
if err != nil {
return nil, err, nil
}
res, err := req.Execute(method, API_URL+uri) res, err := req.Execute(method, API_URL+uri)
if err != nil { if err != nil {
if res != nil { if res != nil {
@ -159,11 +169,11 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
isRetry := len(retry) > 0 && retry[0] isRetry := len(retry) > 0 && retry[0]
if e.Code != "" { if e.Code != "" {
if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.getAccessToken() == "") { if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.getAccessToken() == "") {
err = d.refreshToken() err = d.refreshToken(ctx)
if err != nil { if err != nil {
return nil, err, nil return nil, err, nil
} }
return d.requestReturnErrResp(uri, method, callback, true) return d.requestReturnErrResp(ctx, limitTy, uri, method, callback, true)
} }
return nil, fmt.Errorf("%s:%s", e.Code, e.Message), &e return nil, fmt.Errorf("%s:%s", e.Code, e.Message), &e
} }
@ -172,7 +182,7 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
func (d *AliyundriveOpen) list(ctx context.Context, data base.Json) (*Files, error) { func (d *AliyundriveOpen) list(ctx context.Context, data base.Json) (*Files, error) {
var resp Files var resp Files
_, err := d.request("/adrive/v1.0/openFile/list", http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterList, "/adrive/v1.0/openFile/list", http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp) req.SetBody(data).SetResult(&resp)
}) })
if err != nil { if err != nil {
@ -201,7 +211,7 @@ func (d *AliyundriveOpen) getFiles(ctx context.Context, fileId string) ([]File,
//"video_thumbnail_width": 480, //"video_thumbnail_width": 480,
//"image_thumbnail_width": 480, //"image_thumbnail_width": 480,
} }
resp, err := d.limitList(ctx, data) resp, err := d.list(ctx, data)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -2,7 +2,6 @@ package aliyundrive_share
import ( import (
"context" "context"
"fmt"
"net/http" "net/http"
"time" "time"
@ -12,7 +11,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/cron" "github.com/OpenListTeam/OpenList/v4/pkg/cron"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/rateg"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -25,8 +23,7 @@ type AliyundriveShare struct {
DriveId string DriveId string
cron *cron.Cron cron *cron.Cron
limitList func(ctx context.Context, dir model.Obj) ([]model.Obj, error) limiter *limiter
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
} }
func (d *AliyundriveShare) Config() driver.Config { func (d *AliyundriveShare) Config() driver.Config {
@ -38,29 +35,26 @@ func (d *AliyundriveShare) GetAddition() driver.Additional {
} }
func (d *AliyundriveShare) Init(ctx context.Context) error { func (d *AliyundriveShare) Init(ctx context.Context) error {
err := d.refreshToken() d.limiter = getLimiter()
err := d.refreshToken(ctx)
if err != nil { if err != nil {
d.limiter.free()
d.limiter = nil
return err return err
} }
err = d.getShareToken() err = d.getShareToken(ctx)
if err != nil { if err != nil {
d.limiter.free()
d.limiter = nil
return err return err
} }
d.cron = cron.NewCron(time.Hour * 2) d.cron = cron.NewCron(time.Hour * 2)
d.cron.Do(func() { d.cron.Do(func() {
err := d.refreshToken() err := d.refreshToken(ctx)
if err != nil { if err != nil {
log.Errorf("%+v", err) log.Errorf("%+v", err)
} }
}) })
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
Limit: 4,
Bucket: 1,
})
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
Limit: 1,
Bucket: 1,
})
return nil return nil
} }
@ -68,19 +62,14 @@ func (d *AliyundriveShare) Drop(ctx context.Context) error {
if d.cron != nil { if d.cron != nil {
d.cron.Stop() d.cron.Stop()
} }
d.limiter.free()
d.limiter = nil
d.DriveId = "" d.DriveId = ""
return nil return nil
} }
func (d *AliyundriveShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { func (d *AliyundriveShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if d.limitList == nil { files, err := d.getFiles(ctx, dir.GetID())
return nil, fmt.Errorf("driver not init")
}
return d.limitList(ctx, dir)
}
func (d *AliyundriveShare) list(ctx context.Context, dir model.Obj) ([]model.Obj, error) {
files, err := d.getFiles(dir.GetID())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -90,13 +79,6 @@ func (d *AliyundriveShare) list(ctx context.Context, dir model.Obj) ([]model.Obj
} }
func (d *AliyundriveShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *AliyundriveShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.limitLink == nil {
return nil, fmt.Errorf("driver not init")
}
return d.limitLink(ctx, file)
}
func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Link, error) {
data := base.Json{ data := base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": file.GetID(), "file_id": file.GetID(),
@ -105,7 +87,7 @@ func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Lin
"share_id": d.ShareId, "share_id": d.ShareId,
} }
var resp ShareLinkResp var resp ShareLinkResp
_, err := d.request("https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterLink, "https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) {
req.SetHeader(CanaryHeaderKey, CanaryHeaderValue).SetBody(data).SetResult(&resp) req.SetHeader(CanaryHeaderKey, CanaryHeaderValue).SetBody(data).SetResult(&resp)
}) })
if err != nil { if err != nil {
@ -135,7 +117,7 @@ func (d *AliyundriveShare) Other(ctx context.Context, args model.OtherArgs) (int
default: default:
return nil, errs.NotSupport return nil, errs.NotSupport
} }
_, err := d.request(url, http.MethodPost, func(req *resty.Request) { _, err := d.request(ctx, limiterOther, url, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp) req.SetBody(data).SetResult(&resp)
}) })
if err != nil { if err != nil {

View File

@ -0,0 +1,67 @@
package aliyundrive_share
import (
"context"
"fmt"
"golang.org/x/time/rate"
)
// See issue https://github.com/OpenListTeam/OpenList/issues/724
// Seems there is no limit per user.
type limiterType int
const (
limiterList limiterType = iota
limiterLink
limiterOther
)
const (
listRateLimit = 3.9 // 4 per second in document, but we use 3.9 per second to be safe
linkRateLimit = 0.9 // 1 per second in document, but we use 0.9 per second to be safe
otherRateLimit = 14.9 // 15 per second in document, but we use 14.9 per second to be safe
)
type limiter struct {
list *rate.Limiter
link *rate.Limiter
other *rate.Limiter
}
func getLimiter() *limiter {
return &limiter{
list: rate.NewLimiter(rate.Limit(listRateLimit), 1),
link: rate.NewLimiter(rate.Limit(linkRateLimit), 1),
other: rate.NewLimiter(rate.Limit(otherRateLimit), 1),
}
}
func (l *limiter) wait(ctx context.Context, typ limiterType) error {
if l == nil {
return fmt.Errorf("driver not init")
}
switch typ {
case limiterList:
return l.list.Wait(ctx)
case limiterLink:
return l.link.Wait(ctx)
case limiterOther:
return l.other.Wait(ctx)
default:
return fmt.Errorf("unknown limiter type")
}
}
func (l *limiter) free() {
}
func (d *AliyundriveShare) wait(ctx context.Context, typ limiterType) error {
if d == nil {
return fmt.Errorf("driver not init")
}
//if d.ref != nil {
// return d.ref.wait(ctx, typ) // If this is a reference driver, wait on the reference driver.
//}
return d.limiter.wait(ctx, typ)
}

View File

@ -1,6 +1,7 @@
package aliyundrive_share package aliyundrive_share
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
@ -15,11 +16,15 @@ const (
CanaryHeaderValue = "client=web,app=share,version=v2.3.1" CanaryHeaderValue = "client=web,app=share,version=v2.3.1"
) )
func (d *AliyundriveShare) refreshToken() error { func (d *AliyundriveShare) refreshToken(ctx context.Context) error {
err := d.wait(ctx, limiterOther)
if err != nil {
return err
}
url := "https://auth.alipan.com/v2/account/token" url := "https://auth.alipan.com/v2/account/token"
var resp base.TokenResp var resp base.TokenResp
var e ErrorResp var e ErrorResp
_, err := base.RestyClient.R(). _, err = base.RestyClient.R().
SetBody(base.Json{"refresh_token": d.RefreshToken, "grant_type": "refresh_token"}). SetBody(base.Json{"refresh_token": d.RefreshToken, "grant_type": "refresh_token"}).
SetResult(&resp). SetResult(&resp).
SetError(&e). SetError(&e).
@ -36,7 +41,11 @@ func (d *AliyundriveShare) refreshToken() error {
} }
// do others that not defined in Driver interface // do others that not defined in Driver interface
func (d *AliyundriveShare) getShareToken() error { func (d *AliyundriveShare) getShareToken(ctx context.Context) error {
err := d.wait(ctx, limiterOther)
if err != nil {
return err
}
data := base.Json{ data := base.Json{
"share_id": d.ShareId, "share_id": d.ShareId,
} }
@ -45,7 +54,7 @@ func (d *AliyundriveShare) getShareToken() error {
} }
var e ErrorResp var e ErrorResp
var resp ShareTokenResp var resp ShareTokenResp
_, err := base.RestyClient.R(). _, err = base.RestyClient.R().
SetResult(&resp).SetError(&e).SetBody(data). SetResult(&resp).SetError(&e).SetBody(data).
Post("https://api.alipan.com/v2/share_link/get_share_token") Post("https://api.alipan.com/v2/share_link/get_share_token")
if err != nil { if err != nil {
@ -58,7 +67,7 @@ func (d *AliyundriveShare) getShareToken() error {
return nil return nil
} }
func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback) ([]byte, error) { func (d *AliyundriveShare) request(ctx context.Context, limitTy limiterType, url, method string, callback base.ReqCallback) ([]byte, error) {
var e ErrorResp var e ErrorResp
req := base.RestyClient.R(). req := base.RestyClient.R().
SetError(&e). SetError(&e).
@ -71,6 +80,10 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
} else { } else {
req.SetBody("{}") req.SetBody("{}")
} }
err := d.wait(ctx, limitTy)
if err != nil {
return nil, err
}
resp, err := req.Execute(method, url) resp, err := req.Execute(method, url)
if err != nil { if err != nil {
return nil, err return nil, err
@ -78,14 +91,14 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
if e.Code != "" { if e.Code != "" {
if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" { if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" {
if e.Code == "AccessTokenInvalid" { if e.Code == "AccessTokenInvalid" {
err = d.refreshToken() err = d.refreshToken(ctx)
} else { } else {
err = d.getShareToken() err = d.getShareToken(ctx)
} }
if err != nil { if err != nil {
return nil, err return nil, err
} }
return d.request(url, method, callback) return d.request(ctx, limitTy, url, method, callback)
} else { } else {
return nil, errors.New(e.Code + ": " + e.Message) return nil, errors.New(e.Code + ": " + e.Message)
} }
@ -93,7 +106,7 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
return resp.Body(), nil return resp.Body(), nil
} }
func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) { func (d *AliyundriveShare) getFiles(ctx context.Context, fileId string) ([]File, error) {
files := make([]File, 0) files := make([]File, 0)
data := base.Json{ data := base.Json{
"image_thumbnail_process": "image/resize,w_160/format,jpeg", "image_thumbnail_process": "image/resize,w_160/format,jpeg",
@ -110,6 +123,10 @@ func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
if data["marker"] == "first" { if data["marker"] == "first" {
data["marker"] = "" data["marker"] = ""
} }
err := d.wait(ctx, limiterList)
if err != nil {
return nil, err
}
var e ErrorResp var e ErrorResp
var resp ListResp var resp ListResp
res, err := base.RestyClient.R(). res, err := base.RestyClient.R().
@ -123,11 +140,11 @@ func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
log.Debugf("aliyundrive share get files: %s", res.String()) log.Debugf("aliyundrive share get files: %s", res.String())
if e.Code != "" { if e.Code != "" {
if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" { if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" {
err = d.getShareToken() err = d.getShareToken(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return d.getFiles(fileId) return d.getFiles(ctx, fileId)
} }
return nil, errors.New(e.Message) return nil, errors.New(e.Message)
} }

View File

@ -20,9 +20,12 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_netdisk" _ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_netdisk"
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_photo" _ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_photo"
_ "github.com/OpenListTeam/OpenList/v4/drivers/chaoxing" _ "github.com/OpenListTeam/OpenList/v4/drivers/chaoxing"
_ "github.com/OpenListTeam/OpenList/v4/drivers/chunk"
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve" _ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve"
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve_v4" _ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve_v4"
_ "github.com/OpenListTeam/OpenList/v4/drivers/cnb_releases"
_ "github.com/OpenListTeam/OpenList/v4/drivers/crypt" _ "github.com/OpenListTeam/OpenList/v4/drivers/crypt"
_ "github.com/OpenListTeam/OpenList/v4/drivers/degoo"
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao" _ "github.com/OpenListTeam/OpenList/v4/drivers/doubao"
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao_share" _ "github.com/OpenListTeam/OpenList/v4/drivers/doubao_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/dropbox" _ "github.com/OpenListTeam/OpenList/v4/drivers/dropbox"
@ -48,6 +51,7 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_app" _ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_app"
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_sharelink" _ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_sharelink"
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist" _ "github.com/OpenListTeam/OpenList/v4/drivers/openlist"
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak" _ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak"
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak_share" _ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_open" _ "github.com/OpenListTeam/OpenList/v4/drivers/quark_open"
@ -59,6 +63,7 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/smb" _ "github.com/OpenListTeam/OpenList/v4/drivers/smb"
_ "github.com/OpenListTeam/OpenList/v4/drivers/strm" _ "github.com/OpenListTeam/OpenList/v4/drivers/strm"
_ "github.com/OpenListTeam/OpenList/v4/drivers/teambition" _ "github.com/OpenListTeam/OpenList/v4/drivers/teambition"
_ "github.com/OpenListTeam/OpenList/v4/drivers/teldrive"
_ "github.com/OpenListTeam/OpenList/v4/drivers/terabox" _ "github.com/OpenListTeam/OpenList/v4/drivers/terabox"
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder" _ "github.com/OpenListTeam/OpenList/v4/drivers/thunder"
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder_browser" _ "github.com/OpenListTeam/OpenList/v4/drivers/thunder_browser"

View File

@ -203,11 +203,12 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
streamSize := stream.GetSize() streamSize := stream.GetSize()
sliceSize := d.getSliceSize(streamSize) sliceSize := d.getSliceSize(streamSize)
count := int(streamSize / sliceSize) count := 1
if streamSize > sliceSize {
count = int((streamSize + sliceSize - 1) / sliceSize)
}
lastBlockSize := streamSize % sliceSize lastBlockSize := streamSize % sliceSize
if lastBlockSize > 0 { if lastBlockSize == 0 {
count++
} else {
lastBlockSize = sliceSize lastBlockSize = sliceSize
} }
@ -363,4 +364,12 @@ func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string
return nil return nil
} }
func (d *BaiduNetdisk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
du, err := d.quota()
if err != nil {
return nil, err
}
return &model.StorageDetails{DiskUsage: *du}, nil
}
var _ driver.Driver = (*BaiduNetdisk)(nil) var _ driver.Driver = (*BaiduNetdisk)(nil)

View File

@ -189,3 +189,12 @@ type PrecreateResp struct {
// return_type=2 // return_type=2
File File `json:"info"` File File `json:"info"`
} }
type QuotaResp struct {
Errno int `json:"errno"`
RequestId int64 `json:"request_id"`
Total uint64 `json:"total"`
Used uint64 `json:"used"`
//Free uint64 `json:"free"`
//Expire bool `json:"expire"`
}

View File

@ -381,6 +381,18 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
return maxSliceSize return maxSliceSize
} }
func (d *BaiduNetdisk) quota() (*model.DiskUsage, error) {
var resp QuotaResp
_, err := d.request("https://pan.baidu.com/api/quota", http.MethodGet, nil, &resp)
if err != nil {
return nil, err
}
return &model.DiskUsage{
TotalSpace: resp.Total,
FreeSpace: resp.Total - resp.Used,
}, nil
}
// func encodeURIComponent(str string) string { // func encodeURIComponent(str string) string {
// r := url.QueryEscape(str) // r := url.QueryEscape(str)
// r = strings.ReplaceAll(r, "+", "%20") // r = strings.ReplaceAll(r, "+", "%20")

View File

@ -262,11 +262,12 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
// 计算需要的数据 // 计算需要的数据
streamSize := stream.GetSize() streamSize := stream.GetSize()
count := int(streamSize / DEFAULT) count := 1
if streamSize > DEFAULT {
count = int((streamSize + DEFAULT - 1) / DEFAULT)
}
lastBlockSize := streamSize % DEFAULT lastBlockSize := streamSize % DEFAULT
if lastBlockSize > 0 { if lastBlockSize == 0 {
count++
} else {
lastBlockSize = DEFAULT lastBlockSize = DEFAULT
} }

View File

@ -255,7 +255,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
}, },
UpdateProgress: up, UpdateProgress: up,
}) })
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r) req, err := http.NewRequestWithContext(ctx, http.MethodPost, "https://pan-yz.chaoxing.com/upload", r)
if err != nil { if err != nil {
return err return err
} }

View File

@ -167,7 +167,7 @@ func (d *ChaoXing) Login() (string, error) {
return "", err return "", err
} }
// Create the request // Create the request
req, err := http.NewRequest("POST", "https://passport2.chaoxing.com/fanyalogin", body) req, err := http.NewRequest(http.MethodPost, "https://passport2.chaoxing.com/fanyalogin", body)
if err != nil { if err != nil {
return "", err return "", err
} }

488
drivers/chunk/driver.go Normal file
View File

@ -0,0 +1,488 @@
package chunk
import (
"bytes"
"context"
"errors"
"fmt"
"io"
stdpath "path"
"strconv"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
)
type Chunk struct {
model.Storage
Addition
}
func (d *Chunk) Config() driver.Config {
return config
}
func (d *Chunk) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Chunk) Init(ctx context.Context) error {
if d.PartSize <= 0 {
return errors.New("part size must be positive")
}
d.RemotePath = utils.FixAndCleanPath(d.RemotePath)
return nil
}
func (d *Chunk) Drop(ctx context.Context) error {
return nil
}
func (d *Chunk) Get(ctx context.Context, path string) (model.Obj, error) {
if utils.PathEqual(path, "/") {
return &model.Object{
Name: "Root",
IsFolder: true,
Path: "/",
}, nil
}
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
if err != nil {
return nil, err
}
remoteActualPath = stdpath.Join(remoteActualPath, path)
if remoteObj, err := op.Get(ctx, remoteStorage, remoteActualPath); err == nil {
return &model.Object{
Path: path,
Name: remoteObj.GetName(),
Size: remoteObj.GetSize(),
Modified: remoteObj.ModTime(),
IsFolder: remoteObj.IsDir(),
HashInfo: remoteObj.GetHash(),
}, nil
}
remoteActualDir, name := stdpath.Split(remoteActualPath)
chunkName := "[openlist_chunk]" + name
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, chunkName), model.ListArgs{})
if err != nil {
return nil, err
}
var totalSize int64 = 0
// 0号块必须存在
chunkSizes := []int64{-1}
h := make(map[*utils.HashType]string)
var first model.Obj
for _, o := range chunkObjs {
if o.IsDir() {
continue
}
if after, ok := strings.CutPrefix(o.GetName(), "hash_"); ok {
hn, value, ok := strings.Cut(strings.TrimSuffix(after, d.CustomExt), "_")
if ok {
ht, ok := utils.GetHashByName(hn)
if ok {
h[ht] = value
}
}
continue
}
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
if err != nil {
continue
}
totalSize += o.GetSize()
if len(chunkSizes) > idx {
if idx == 0 {
first = o
}
chunkSizes[idx] = o.GetSize()
} else if len(chunkSizes) == idx {
chunkSizes = append(chunkSizes, o.GetSize())
} else {
newChunkSizes := make([]int64, idx+1)
copy(newChunkSizes, chunkSizes)
chunkSizes = newChunkSizes
chunkSizes[idx] = o.GetSize()
}
}
// 检查0号块不等于-1 以支持空文件
// 如果块数量大于1 最后一块不可能为0
// 只检查中间块是否有0
for i, l := 0, len(chunkSizes)-2; ; i++ {
if i == 0 {
if chunkSizes[i] == -1 {
return nil, fmt.Errorf("chunk part[%d] are missing", i)
}
} else if chunkSizes[i] == 0 {
return nil, fmt.Errorf("chunk part[%d] are missing", i)
}
if i >= l {
break
}
}
reqDir, _ := stdpath.Split(path)
objRes := chunkObject{
Object: model.Object{
Path: stdpath.Join(reqDir, chunkName),
Name: name,
Size: totalSize,
Modified: first.ModTime(),
Ctime: first.CreateTime(),
},
chunkSizes: chunkSizes,
}
if len(h) > 0 {
objRes.HashInfo = utils.NewHashInfoByMap(h)
}
return &objRes, nil
}
func (d *Chunk) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
if err != nil {
return nil, err
}
remoteActualDir := stdpath.Join(remoteActualPath, dir.GetPath())
remoteObjs, err := op.List(ctx, remoteStorage, remoteActualDir, model.ListArgs{
ReqPath: args.ReqPath,
Refresh: args.Refresh,
})
if err != nil {
return nil, err
}
result := make([]model.Obj, 0, len(remoteObjs))
for _, obj := range remoteObjs {
rawName := obj.GetName()
if obj.IsDir() {
if name, ok := strings.CutPrefix(rawName, "[openlist_chunk]"); ok {
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, rawName), model.ListArgs{
ReqPath: stdpath.Join(args.ReqPath, rawName),
Refresh: args.Refresh,
})
if err != nil {
return nil, err
}
totalSize := int64(0)
h := make(map[*utils.HashType]string)
first := obj
for _, o := range chunkObjs {
if o.IsDir() {
continue
}
if after, ok := strings.CutPrefix(strings.TrimSuffix(o.GetName(), d.CustomExt), "hash_"); ok {
hn, value, ok := strings.Cut(after, "_")
if ok {
ht, ok := utils.GetHashByName(hn)
if ok {
h[ht] = value
}
continue
}
}
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
if err != nil {
continue
}
if idx == 0 {
first = o
}
totalSize += o.GetSize()
}
objRes := model.Object{
Name: name,
Size: totalSize,
Modified: first.ModTime(),
Ctime: first.CreateTime(),
}
if len(h) > 0 {
objRes.HashInfo = utils.NewHashInfoByMap(h)
}
if !d.Thumbnail {
result = append(result, &objRes)
} else {
thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp")
thumb := fmt.Sprintf("%s/d%s?sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(thumbPath, true),
sign.Sign(thumbPath))
result = append(result, &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
})
}
continue
}
}
if !d.ShowHidden && strings.HasPrefix(rawName, ".") {
continue
}
thumb, ok := model.GetThumb(obj)
objRes := model.Object{
Name: rawName,
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}
if !ok {
result = append(result, &objRes)
} else {
result = append(result, &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
})
}
}
return result, nil
}
func (d *Chunk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
if err != nil {
return nil, err
}
chunkFile, ok := file.(*chunkObject)
remoteActualPath = stdpath.Join(remoteActualPath, file.GetPath())
if !ok {
l, _, err := op.Link(ctx, remoteStorage, remoteActualPath, args)
if err != nil {
return nil, err
}
resultLink := *l
resultLink.SyncClosers = utils.NewSyncClosers(l)
return &resultLink, nil
}
fileSize := chunkFile.GetSize()
mergedRrf := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
start := httpRange.Start
length := httpRange.Length
if length < 0 || start+length > fileSize {
length = fileSize - start
}
if length == 0 {
return io.NopCloser(strings.NewReader("")), nil
}
rs := make([]io.Reader, 0)
cs := make(utils.Closers, 0)
var (
rc io.ReadCloser
readFrom bool
)
for idx, chunkSize := range chunkFile.chunkSizes {
if readFrom {
l, o, err := op.Link(ctx, remoteStorage, stdpath.Join(remoteActualPath, d.getPartName(idx)), args)
if err != nil {
_ = cs.Close()
return nil, err
}
cs = append(cs, l)
chunkSize2 := l.ContentLength
if chunkSize2 <= 0 {
chunkSize2 = o.GetSize()
}
if chunkSize2 != chunkSize {
_ = cs.Close()
return nil, fmt.Errorf("chunk part[%d] size not match", idx)
}
rrf, err := stream.GetRangeReaderFromLink(chunkSize2, l)
if err != nil {
_ = cs.Close()
return nil, err
}
newLength := length - chunkSize2
if newLength >= 0 {
length = newLength
rc, err = rrf.RangeRead(ctx, http_range.Range{Length: -1})
} else {
rc, err = rrf.RangeRead(ctx, http_range.Range{Length: length})
}
if err != nil {
_ = cs.Close()
return nil, err
}
rs = append(rs, rc)
cs = append(cs, rc)
if newLength <= 0 {
return utils.ReadCloser{
Reader: io.MultiReader(rs...),
Closer: &cs,
}, nil
}
} else if newStart := start - chunkSize; newStart >= 0 {
start = newStart
} else {
l, o, err := op.Link(ctx, remoteStorage, stdpath.Join(remoteActualPath, d.getPartName(idx)), args)
if err != nil {
_ = cs.Close()
return nil, err
}
cs = append(cs, l)
chunkSize2 := l.ContentLength
if chunkSize2 <= 0 {
chunkSize2 = o.GetSize()
}
if chunkSize2 != chunkSize {
_ = cs.Close()
return nil, fmt.Errorf("chunk part[%d] size not match", idx)
}
rrf, err := stream.GetRangeReaderFromLink(chunkSize2, l)
if err != nil {
_ = cs.Close()
return nil, err
}
rc, err = rrf.RangeRead(ctx, http_range.Range{Start: start, Length: -1})
if err != nil {
_ = cs.Close()
return nil, err
}
length -= chunkSize2 - start
cs = append(cs, rc)
if length <= 0 {
return utils.ReadCloser{
Reader: rc,
Closer: &cs,
}, nil
}
rs = append(rs, rc)
readFrom = true
}
}
return nil, fmt.Errorf("invalid range: start=%d,length=%d,fileSize=%d", httpRange.Start, httpRange.Length, fileSize)
}
return &model.Link{
RangeReader: stream.RangeReaderFunc(mergedRrf),
}, nil
}
func (d *Chunk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
path := stdpath.Join(d.RemotePath, parentDir.GetPath(), dirName)
return fs.MakeDir(ctx, path)
}
func (d *Chunk) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
src := stdpath.Join(d.RemotePath, srcObj.GetPath())
dst := stdpath.Join(d.RemotePath, dstDir.GetPath())
_, err := fs.Move(ctx, src, dst)
return err
}
func (d *Chunk) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if _, ok := srcObj.(*chunkObject); ok {
newName = "[openlist_chunk]" + newName
}
return fs.Rename(ctx, stdpath.Join(d.RemotePath, srcObj.GetPath()), newName)
}
func (d *Chunk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
dst := stdpath.Join(d.RemotePath, dstDir.GetPath())
src := stdpath.Join(d.RemotePath, srcObj.GetPath())
_, err := fs.Copy(ctx, src, dst)
return err
}
func (d *Chunk) Remove(ctx context.Context, obj model.Obj) error {
return fs.Remove(ctx, stdpath.Join(d.RemotePath, obj.GetPath()))
}
func (d *Chunk) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
if err != nil {
return err
}
if d.Thumbnail && dstDir.GetName() == ".thumbnails" {
return op.Put(ctx, remoteStorage, stdpath.Join(remoteActualPath, dstDir.GetPath()), file, up)
}
upReader := &driver.ReaderUpdatingProgress{
Reader: file,
UpdateProgress: up,
}
dst := stdpath.Join(remoteActualPath, dstDir.GetPath(), "[openlist_chunk]"+file.GetName())
if d.StoreHash {
for ht, value := range file.GetHash().All() {
_ = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
Obj: &model.Object{
Name: fmt.Sprintf("hash_%s_%s%s", ht.Name, value, d.CustomExt),
Size: 1,
Modified: file.ModTime(),
},
Mimetype: "application/octet-stream",
Reader: bytes.NewReader([]byte{0}), // 兼容不支持空文件的驱动
}, nil, true)
}
}
fullPartCount := int(file.GetSize() / d.PartSize)
tailSize := file.GetSize() % d.PartSize
if tailSize == 0 && fullPartCount > 0 {
fullPartCount--
tailSize = d.PartSize
}
partIndex := 0
for partIndex < fullPartCount {
err = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
Obj: &model.Object{
Name: d.getPartName(partIndex),
Size: d.PartSize,
Modified: file.ModTime(),
},
Mimetype: file.GetMimetype(),
Reader: io.LimitReader(upReader, d.PartSize),
}, nil, true)
if err != nil {
_ = op.Remove(ctx, remoteStorage, dst)
return err
}
partIndex++
}
err = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
Obj: &model.Object{
Name: d.getPartName(fullPartCount),
Size: tailSize,
Modified: file.ModTime(),
},
Mimetype: file.GetMimetype(),
Reader: upReader,
}, nil)
if err != nil {
_ = op.Remove(ctx, remoteStorage, dst)
}
return err
}
func (d *Chunk) getPartName(part int) string {
return fmt.Sprintf("%d%s", part, d.CustomExt)
}
func (d *Chunk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
remoteStorage, err := fs.GetStorage(d.RemotePath, &fs.GetStoragesArgs{})
if err != nil {
return nil, errs.NotImplement
}
wd, ok := remoteStorage.(driver.WithDetails)
if !ok {
return nil, errs.NotImplement
}
remoteDetails, err := wd.GetDetails(ctx)
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: remoteDetails.DiskUsage,
}, nil
}
var _ driver.Driver = (*Chunk)(nil)

31
drivers/chunk/meta.go Normal file
View File

@ -0,0 +1,31 @@
package chunk
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
RemotePath string `json:"remote_path" required:"true"`
PartSize int64 `json:"part_size" required:"true" type:"number" help:"bytes"`
CustomExt string `json:"custom_ext" type:"string"`
StoreHash bool `json:"store_hash" type:"bool" default:"true"`
Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"`
ShowHidden bool `json:"show_hidden" default:"true" required:"false" help:"show hidden directories and files"`
}
var config = driver.Config{
Name: "Chunk",
LocalSort: true,
OnlyProxy: true,
NoCache: true,
DefaultRoot: "/",
NoLinkURL: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Chunk{}
})
}

8
drivers/chunk/obj.go Normal file
View File

@ -0,0 +1,8 @@
package chunk
import "github.com/OpenListTeam/OpenList/v4/internal/model"
type chunkObject struct {
model.Object
chunkSizes []int64
}

View File

@ -18,6 +18,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/setting" "github.com/OpenListTeam/OpenList/v4/internal/setting"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/cookie" "github.com/OpenListTeam/OpenList/v4/pkg/cookie"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go" "github.com/avast/retry-go"
@ -236,28 +237,32 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
} }
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error { func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadURLs[0] uploadUrl := u.UploadURLs[0]
credential := u.Credential credential := u.Credential
var finish int64 = 0 var finish int64 = 0
var chunk int = 0 var chunk int = 0
DEFAULT := int64(u.ChunkSize)
for finish < stream.GetSize() { for finish < stream.GetSize() {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
} }
left := stream.GetSize() - finish left := stream.GetSize() - finish
byteSize := min(left, DEFAULT) byteSize := min(left, DEFAULT)
err := retry.Do( utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
func() error { func() error {
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()) rd.Seek(0, io.SeekStart)
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk), req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) driver.NewLimitedUploadStream(ctx, rd))
if err != nil { if err != nil {
return err return err
} }
@ -290,6 +295,7 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
retry.DelayType(retry.BackOffDelay), retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second), retry.Delay(time.Second),
) )
ss.FreeSectionReader(rd)
if err != nil { if err != nil {
return err return err
} }
@ -301,26 +307,29 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
} }
func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error { func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadURLs[0] uploadUrl := u.UploadURLs[0]
var finish int64 = 0 var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
for finish < stream.GetSize() { for finish < stream.GetSize() {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
} }
left := stream.GetSize() - finish left := stream.GetSize() - finish
byteSize := min(left, DEFAULT) byteSize := min(left, DEFAULT)
err := retry.Do( utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
func() error { func() error {
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()) rd.Seek(0, io.SeekStart)
byteData := make([]byte, byteSize) req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl,
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil { if err != nil {
return err return err
} }
@ -346,6 +355,7 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
retry.DelayType(retry.BackOffDelay), retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second), retry.Delay(time.Second),
) )
ss.FreeSectionReader(rd)
if err != nil { if err != nil {
return err return err
} }
@ -359,27 +369,31 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
} }
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error { func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
var finish int64 = 0 var finish int64 = 0
var chunk int = 0 var chunk int = 0
var etags []string var etags []string
DEFAULT := int64(u.ChunkSize)
for finish < stream.GetSize() { for finish < stream.GetSize() {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
} }
left := stream.GetSize() - finish left := stream.GetSize() - finish
byteSize := min(left, DEFAULT) byteSize := min(left, DEFAULT)
err := retry.Do( utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
func() error { func() error {
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()) rd.Seek(0, io.SeekStart)
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadURLs[chunk], req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadURLs[chunk],
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData))) driver.NewLimitedUploadStream(ctx, rd))
if err != nil { if err != nil {
return err return err
} }
@ -404,6 +418,7 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
retry.DelayType(retry.BackOffDelay), retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second), retry.Delay(time.Second),
) )
ss.FreeSectionReader(rd)
if err != nil { if err != nil {
return err return err
} }

View File

@ -20,7 +20,9 @@ import (
type CloudreveV4 struct { type CloudreveV4 struct {
model.Storage model.Storage
Addition Addition
ref *CloudreveV4 ref *CloudreveV4
AccessExpires string
RefreshExpires string
} }
func (d *CloudreveV4) Config() driver.Config { func (d *CloudreveV4) Config() driver.Config {
@ -44,13 +46,17 @@ func (d *CloudreveV4) Init(ctx context.Context) error {
if d.ref != nil { if d.ref != nil {
return nil return nil
} }
if d.AccessToken == "" && d.RefreshToken != "" { if d.canLogin() {
return d.refreshToken()
}
if d.Username != "" {
return d.login() return d.login()
} }
return nil if d.RefreshToken != "" {
return d.refreshToken()
}
if d.AccessToken == "" {
return errors.New("no way to authenticate. At least AccessToken is required")
}
// ensure AccessToken is valid
return d.parseJWT(d.AccessToken, &AccessJWT{})
} }
func (d *CloudreveV4) InitReference(storage driver.Driver) error { func (d *CloudreveV4) InitReference(storage driver.Driver) error {
@ -333,6 +339,21 @@ func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir mode
return nil, errs.NotImplement return nil, errs.NotImplement
} }
func (d *CloudreveV4) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
// TODO return storage details (total space, free space, etc.)
var r CapacityResp
err := d.request(http.MethodGet, "/user/capacity", nil, &r)
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: r.Total,
FreeSpace: r.Total - r.Used,
},
}, nil
}
//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { //func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport // return nil, errs.NotSupport
//} //}

View File

@ -66,11 +66,27 @@ type CaptchaResp struct {
Ticket string `json:"ticket"` Ticket string `json:"ticket"`
} }
type AccessJWT struct {
TokenType string `json:"token_type"`
Sub string `json:"sub"`
Exp int64 `json:"exp"`
Nbf int64 `json:"nbf"`
}
type RefreshJWT struct {
TokenType string `json:"token_type"`
Sub string `json:"sub"`
Exp int `json:"exp"`
Nbf int `json:"nbf"`
StateHash string `json:"state_hash"`
RootTokenID string `json:"root_token_id"`
}
type Token struct { type Token struct {
AccessToken string `json:"access_token"` AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"` RefreshToken string `json:"refresh_token"`
AccessExpires time.Time `json:"access_expires"` AccessExpires string `json:"access_expires"`
RefreshExpires time.Time `json:"refresh_expires"` RefreshExpires string `json:"refresh_expires"`
} }
type TokenResponse struct { type TokenResponse struct {
@ -188,3 +204,9 @@ type FolderSummaryResp struct {
CalculatedAt time.Time `json:"calculated_at"` CalculatedAt time.Time `json:"calculated_at"`
} `json:"folder_summary"` } `json:"folder_summary"`
} }
type CapacityResp struct {
Total uint64 `json:"total"`
Used uint64 `json:"used"`
// StoragePackTotal uint64 `json:"storage_pack_total"`
}

View File

@ -19,6 +19,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting" "github.com/OpenListTeam/OpenList/v4/internal/setting"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go" "github.com/avast/retry-go"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
@ -27,6 +28,15 @@ import (
// do others that not defined in Driver interface // do others that not defined in Driver interface
const (
CodeLoginRequired = http.StatusUnauthorized
CodeCredentialInvalid = 40020 // Failed to issue token
)
var (
ErrorIssueToken = errors.New("failed to issue token")
)
func (d *CloudreveV4) getUA() string { func (d *CloudreveV4) getUA() string {
if d.CustomUA != "" { if d.CustomUA != "" {
return d.CustomUA return d.CustomUA
@ -38,6 +48,23 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
if d.ref != nil { if d.ref != nil {
return d.ref.request(method, path, callback, out) return d.ref.request(method, path, callback, out)
} }
// ensure token
if d.isTokenExpired() {
err := d.refreshToken()
if err != nil {
return err
}
}
return d._request(method, path, callback, out)
}
func (d *CloudreveV4) _request(method string, path string, callback base.ReqCallback, out any) error {
if d.ref != nil {
return d.ref._request(method, path, callback, out)
}
u := d.Address + "/api/v4" + path u := d.Address + "/api/v4" + path
req := base.RestyClient.R() req := base.RestyClient.R()
req.SetHeaders(map[string]string{ req.SetHeaders(map[string]string{
@ -64,15 +91,17 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
} }
if r.Code != 0 { if r.Code != 0 {
if r.Code == 401 && d.RefreshToken != "" && path != "/session/token/refresh" { if r.Code == CodeLoginRequired && d.canLogin() && path != "/session/token/refresh" {
// try to refresh token err = d.login()
err = d.refreshToken()
if err != nil { if err != nil {
return err return err
} }
return d.request(method, path, callback, out) return d.request(method, path, callback, out)
} }
return errors.New(r.Msg) if r.Code == CodeCredentialInvalid {
return ErrorIssueToken
}
return fmt.Errorf("%d: %s", r.Code, r.Msg)
} }
if out != nil && r.Data != nil { if out != nil && r.Data != nil {
@ -90,14 +119,18 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
return nil return nil
} }
func (d *CloudreveV4) canLogin() bool {
return d.Username != "" && d.Password != ""
}
func (d *CloudreveV4) login() error { func (d *CloudreveV4) login() error {
var siteConfig SiteLoginConfigResp var siteConfig SiteLoginConfigResp
err := d.request(http.MethodGet, "/site/config/login", nil, &siteConfig) err := d._request(http.MethodGet, "/site/config/login", nil, &siteConfig)
if err != nil { if err != nil {
return err return err
} }
var prepareLogin PrepareLoginResp var prepareLogin PrepareLoginResp
err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin) err = d._request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
if err != nil { if err != nil {
return err return err
} }
@ -127,7 +160,7 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
} }
if needCaptcha { if needCaptcha {
var config BasicConfigResp var config BasicConfigResp
err = d.request(http.MethodGet, "/site/config/basic", nil, &config) err = d._request(http.MethodGet, "/site/config/basic", nil, &config)
if err != nil { if err != nil {
return err return err
} }
@ -135,7 +168,7 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
return fmt.Errorf("captcha type %s not support", config.CaptchaType) return fmt.Errorf("captcha type %s not support", config.CaptchaType)
} }
var captcha CaptchaResp var captcha CaptchaResp
err = d.request(http.MethodGet, "/site/captcha", nil, &captcha) err = d._request(http.MethodGet, "/site/captcha", nil, &captcha)
if err != nil { if err != nil {
return err return err
} }
@ -161,20 +194,22 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
loginBody["captcha"] = captchaCode loginBody["captcha"] = captchaCode
} }
var token TokenResponse var token TokenResponse
err = d.request(http.MethodPost, "/session/token", func(req *resty.Request) { err = d._request(http.MethodPost, "/session/token", func(req *resty.Request) {
req.SetBody(loginBody) req.SetBody(loginBody)
}, &token) }, &token)
if err != nil { if err != nil {
return err return err
} }
d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken
d.AccessExpires, d.RefreshExpires = token.Token.AccessExpires, token.Token.RefreshExpires
op.MustSaveDriverStorage(d) op.MustSaveDriverStorage(d)
return nil return nil
} }
func (d *CloudreveV4) refreshToken() error { func (d *CloudreveV4) refreshToken() error {
// if no refresh token, try to login if possible
if d.RefreshToken == "" { if d.RefreshToken == "" {
if d.Username != "" { if d.canLogin() {
err := d.login() err := d.login()
if err != nil { if err != nil {
return fmt.Errorf("cannot login to get refresh token, error: %s", err) return fmt.Errorf("cannot login to get refresh token, error: %s", err)
@ -182,20 +217,127 @@ func (d *CloudreveV4) refreshToken() error {
} }
return nil return nil
} }
// parse jwt to check if refresh token is valid
var jwt RefreshJWT
err := d.parseJWT(d.RefreshToken, &jwt)
if err != nil {
// if refresh token is invalid, try to login if possible
if d.canLogin() {
return d.login()
}
d.GetStorage().SetStatus(fmt.Sprintf("Invalid RefreshToken: %s", err.Error()))
op.MustSaveDriverStorage(d)
return fmt.Errorf("invalid refresh token: %w", err)
}
// do refresh token
var token Token var token Token
err := d.request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) { err = d._request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"refresh_token": d.RefreshToken, "refresh_token": d.RefreshToken,
}) })
}, &token) }, &token)
if err != nil { if err != nil {
if errors.Is(err, ErrorIssueToken) {
if d.canLogin() {
// try to login again
return d.login()
}
d.GetStorage().SetStatus("This session is no longer valid")
op.MustSaveDriverStorage(d)
return ErrorIssueToken
}
return err return err
} }
d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken
d.AccessExpires, d.RefreshExpires = token.AccessExpires, token.RefreshExpires
op.MustSaveDriverStorage(d) op.MustSaveDriverStorage(d)
return nil return nil
} }
func (d *CloudreveV4) parseJWT(token string, jwt any) error {
split := strings.Split(token, ".")
if len(split) != 3 {
return fmt.Errorf("invalid token length: %d, ensure the token is a valid JWT", len(split))
}
data, err := base64.RawURLEncoding.DecodeString(split[1])
if err != nil {
return fmt.Errorf("invalid token encoding: %w, ensure the token is a valid JWT", err)
}
err = json.Unmarshal(data, &jwt)
if err != nil {
return fmt.Errorf("invalid token content: %w, ensure the token is a valid JWT", err)
}
return nil
}
// check if token is expired
// https://github.com/cloudreve/frontend/blob/ddfacc1c31c49be03beb71de4cc114c8811038d6/src/session/index.ts#L177-L200
func (d *CloudreveV4) isTokenExpired() bool {
if d.RefreshToken == "" {
// login again if username and password is set
if d.canLogin() {
return true
}
// no refresh token, cannot refresh
return false
}
if d.AccessToken == "" {
return true
}
var (
err error
expires time.Time
)
// check if token is expired
if d.AccessExpires != "" {
// use expires field if possible to prevent timezone issue
// only available after login or refresh token
// 2025-08-28T02:43:07.645109985+08:00
expires, err = time.Parse(time.RFC3339Nano, d.AccessExpires)
if err != nil {
return false
}
} else {
// fallback to parse jwt
// if failed, disable the storage
var jwt AccessJWT
err = d.parseJWT(d.AccessToken, &jwt)
if err != nil {
d.GetStorage().SetStatus(fmt.Sprintf("Invalid AccessToken: %s", err.Error()))
op.MustSaveDriverStorage(d)
return false
}
// may be have timezone issue
expires = time.Unix(jwt.Exp, 0)
}
// add a 10 minutes safe margin
ddl := time.Now().Add(10 * time.Minute)
if expires.Before(ddl) {
// current access token expired, check if refresh token is expired
// warning: cannot parse refresh token from jwt, because the exp field is not standard
if d.RefreshExpires != "" {
refreshExpires, err := time.Parse(time.RFC3339Nano, d.RefreshExpires)
if err != nil {
return false
}
if refreshExpires.Before(time.Now()) {
// This session is no longer valid
if d.canLogin() {
// try to login again
return true
}
d.GetStorage().SetStatus("This session is no longer valid")
op.MustSaveDriverStorage(d)
return false
}
}
return true
}
return false
}
func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error { func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
var finish int64 = 0 var finish int64 = 0
var chunk int = 0 var chunk int = 0
@ -251,28 +393,32 @@ func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u Fi
} }
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error { func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadUrls[0] uploadUrl := u.UploadUrls[0]
credential := u.Credential credential := u.Credential
var finish int64 = 0 var finish int64 = 0
var chunk int = 0 var chunk int = 0
DEFAULT := int64(u.ChunkSize)
for finish < file.GetSize() { for finish < file.GetSize() {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
} }
left := file.GetSize() - finish left := file.GetSize() - finish
byteSize := min(left, DEFAULT) byteSize := min(left, DEFAULT)
err := retry.Do( utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
func() error { func() error {
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize()) rd.Seek(0, io.SeekStart)
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk), req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) driver.NewLimitedUploadStream(ctx, rd))
if err != nil { if err != nil {
return err return err
} }
@ -305,6 +451,7 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
retry.DelayType(retry.BackOffDelay), retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second), retry.Delay(time.Second),
) )
ss.FreeSectionReader(rd)
if err != nil { if err != nil {
return err return err
} }
@ -316,26 +463,29 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
} }
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error { func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadUrls[0] uploadUrl := u.UploadUrls[0]
var finish int64 = 0 var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
for finish < file.GetSize() { for finish < file.GetSize() {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
} }
left := file.GetSize() - finish left := file.GetSize() - finish
byteSize := min(left, DEFAULT) byteSize := min(left, DEFAULT)
err := retry.Do( utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
func() error { func() error {
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize()) rd.Seek(0, io.SeekStart)
byteData := make([]byte, byteSize) req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl,
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil { if err != nil {
return err return err
} }
@ -362,6 +512,7 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
retry.DelayType(retry.BackOffDelay), retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second), retry.Delay(time.Second),
) )
ss.FreeSectionReader(rd)
if err != nil { if err != nil {
return err return err
} }
@ -375,27 +526,31 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
} }
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error { func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
var finish int64 = 0 var finish int64 = 0
var chunk int = 0 var chunk int = 0
var etags []string var etags []string
DEFAULT := int64(u.ChunkSize)
for finish < file.GetSize() { for finish < file.GetSize() {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
} }
left := file.GetSize() - finish left := file.GetSize() - finish
byteSize := min(left, DEFAULT) byteSize := min(left, DEFAULT)
err := retry.Do( utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
func() error { func() error {
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize()) rd.Seek(0, io.SeekStart)
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadUrls[chunk], req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadUrls[chunk],
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData))) driver.NewLimitedUploadStream(ctx, rd))
if err != nil { if err != nil {
return err return err
} }
@ -421,6 +576,7 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
retry.DelayType(retry.BackOffDelay), retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second), retry.Delay(time.Second),
) )
ss.FreeSectionReader(rd)
if err != nil { if err != nil {
return err return err
} }

View File

@ -0,0 +1,230 @@
package cnb_releases
import (
"bytes"
"context"
"fmt"
"io"
"mime/multipart"
"net/http"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
)
type CnbReleases struct {
model.Storage
Addition
ref *CnbReleases
}
func (d *CnbReleases) Config() driver.Config {
return config
}
func (d *CnbReleases) GetAddition() driver.Additional {
return &d.Addition
}
func (d *CnbReleases) Init(ctx context.Context) error {
return nil
}
func (d *CnbReleases) InitReference(storage driver.Driver) error {
refStorage, ok := storage.(*CnbReleases)
if ok {
d.ref = refStorage
return nil
}
return fmt.Errorf("ref: storage is not CnbReleases")
}
func (d *CnbReleases) Drop(ctx context.Context) error {
d.ref = nil
return nil
}
func (d *CnbReleases) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if dir.GetPath() == "/" {
// get all releases for root dir
var resp ReleaseList
err := d.Request(http.MethodGet, "/{repo}/-/releases", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
}, &resp)
if err != nil {
return nil, err
}
return utils.SliceConvert(resp, func(src Release) (model.Obj, error) {
name := src.Name
if d.UseTagName {
name = src.TagName
}
return &model.Object{
ID: src.ID,
Name: name,
Size: d.sumAssetsSize(src.Assets),
Ctime: src.CreatedAt,
Modified: src.UpdatedAt,
IsFolder: true,
}, nil
})
} else {
// get release info by release id
releaseID := dir.GetID()
if releaseID == "" {
return nil, errs.ObjectNotFound
}
var resp Release
err := d.Request(http.MethodGet, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetPathParam("release_id", releaseID)
}, &resp)
if err != nil {
return nil, err
}
return utils.SliceConvert(resp.Assets, func(src ReleaseAsset) (model.Obj, error) {
return &Object{
Object: model.Object{
ID: src.ID,
Path: src.Path,
Name: src.Name,
Size: src.Size,
Ctime: src.CreatedAt,
Modified: src.UpdatedAt,
IsFolder: false,
},
ParentID: dir.GetID(),
}, nil
})
}
}
func (d *CnbReleases) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
return &model.Link{
URL: "https://cnb.cool" + file.GetPath(),
}, nil
}
func (d *CnbReleases) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if parentDir.GetPath() == "/" {
// create a new release
branch := d.DefaultBranch
if branch == "" {
branch = "main" // fallback to "main" if not set
}
return d.Request(http.MethodPost, "/{repo}/-/releases", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetBody(base.Json{
"name": dirName,
"tag_name": dirName,
"target_commitish": branch,
})
}, nil)
}
return errs.NotImplement
}
func (d *CnbReleases) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return nil, errs.NotImplement
}
func (d *CnbReleases) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if srcObj.IsDir() && !d.UseTagName {
return d.Request(http.MethodPatch, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetPathParam("release_id", srcObj.GetID())
req.SetFormData(map[string]string{
"name": newName,
})
}, nil)
}
return errs.NotImplement
}
func (d *CnbReleases) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return nil, errs.NotImplement
}
func (d *CnbReleases) Remove(ctx context.Context, obj model.Obj) error {
if obj.IsDir() {
return d.Request(http.MethodDelete, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetPathParam("release_id", obj.GetID())
}, nil)
}
if o, ok := obj.(*Object); ok {
return d.Request(http.MethodDelete, "/{repo}/-/releases/{release_id}/assets/{asset_id}", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetPathParam("release_id", o.ParentID)
req.SetPathParam("asset_id", obj.GetID())
}, nil)
} else {
return fmt.Errorf("unable to get release ID")
}
}
func (d *CnbReleases) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
// 1. get upload info
var resp ReleaseAssetUploadURL
err := d.Request(http.MethodPost, "/{repo}/-/releases/{release_id}/asset-upload-url", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetPathParam("release_id", dstDir.GetID())
req.SetBody(base.Json{
"asset_name": file.GetName(),
"overwrite": true,
"size": file.GetSize(),
})
}, &resp)
if err != nil {
return err
}
// 2. upload file
// use multipart to create form file
var b bytes.Buffer
w := multipart.NewWriter(&b)
_, err = w.CreateFormFile("file", file.GetName())
if err != nil {
return err
}
headSize := b.Len()
err = w.Close()
if err != nil {
return err
}
head := bytes.NewReader(b.Bytes()[:headSize])
tail := bytes.NewReader(b.Bytes()[headSize:])
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.MultiReader(head, file, tail))
// use net/http to upload file
ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Duration(resp.ExpiresInSec+1)*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctxWithTimeout, http.MethodPost, resp.UploadURL, rateLimitedRd)
if err != nil {
return err
}
req.Header.Set("Content-Type", w.FormDataContentType())
req.Header.Set("User-Agent", base.UserAgent)
httpResp, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer httpResp.Body.Close()
if httpResp.StatusCode != http.StatusNoContent {
return fmt.Errorf("upload file failed: %s", httpResp.Status)
}
// 3. verify upload
return d.Request(http.MethodPost, resp.VerifyURL, nil, nil)
}
var _ driver.Driver = (*CnbReleases)(nil)

View File

@ -0,0 +1,26 @@
package cnb_releases
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
driver.RootPath
Repo string `json:"repo" type:"string" required:"true"`
Token string `json:"token" type:"string" required:"true"`
UseTagName bool `json:"use_tag_name" type:"bool" default:"false" help:"Use tag name instead of release name"`
DefaultBranch string `json:"default_branch" type:"string" default:"main" help:"Default branch for new releases"`
}
var config = driver.Config{
Name: "CNB Releases",
LocalSort: true,
DefaultRoot: "/",
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &CnbReleases{}
})
}

View File

@ -0,0 +1,100 @@
package cnb_releases
import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
)
type Object struct {
model.Object
ParentID string
}
type TagList []Tag
type Tag struct {
Commit struct {
Author UserInfo `json:"author"`
Commit CommitObject `json:"commit"`
Committer UserInfo `json:"committer"`
Parents []CommitParent `json:"parents"`
Sha string `json:"sha"`
} `json:"commit"`
Name string `json:"name"`
Target string `json:"target"`
TargetType string `json:"target_type"`
Verification TagObjectVerification `json:"verification"`
}
type UserInfo struct {
Freeze bool `json:"freeze"`
Nickname string `json:"nickname"`
Username string `json:"username"`
}
type CommitObject struct {
Author Signature `json:"author"`
CommentCount int `json:"comment_count"`
Committer Signature `json:"committer"`
Message string `json:"message"`
Tree CommitObjectTree `json:"tree"`
Verification CommitObjectVerification `json:"verification"`
}
type Signature struct {
Date time.Time `json:"date"`
Email string `json:"email"`
Name string `json:"name"`
}
type CommitObjectTree struct {
Sha string `json:"sha"`
}
type CommitObjectVerification struct {
Payload string `json:"payload"`
Reason string `json:"reason"`
Signature string `json:"signature"`
Verified bool `json:"verified"`
VerifiedAt string `json:"verified_at"`
}
type CommitParent = CommitObjectTree
type TagObjectVerification = CommitObjectVerification
type ReleaseList []Release
type Release struct {
Assets []ReleaseAsset `json:"assets"`
Author UserInfo `json:"author"`
Body string `json:"body"`
CreatedAt time.Time `json:"created_at"`
Draft bool `json:"draft"`
ID string `json:"id"`
IsLatest bool `json:"is_latest"`
Name string `json:"name"`
Prerelease bool `json:"prerelease"`
PublishedAt time.Time `json:"published_at"`
TagCommitish string `json:"tag_commitish"`
TagName string `json:"tag_name"`
UpdatedAt time.Time `json:"updated_at"`
}
type ReleaseAsset struct {
ContentType string `json:"content_type"`
CreatedAt time.Time `json:"created_at"`
ID string `json:"id"`
Name string `json:"name"`
Path string `json:"path"`
Size int64 `json:"size"`
UpdatedAt time.Time `json:"updated_at"`
Uploader UserInfo `json:"uploader"`
}
type ReleaseAssetUploadURL struct {
UploadURL string `json:"upload_url"`
ExpiresInSec int `json:"expires_in_sec"`
VerifyURL string `json:"verify_url"`
}

View File

@ -0,0 +1,58 @@
package cnb_releases
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
log "github.com/sirupsen/logrus"
)
// do others that not defined in Driver interface
func (d *CnbReleases) Request(method string, path string, callback base.ReqCallback, resp any) error {
if d.ref != nil {
return d.ref.Request(method, path, callback, resp)
}
var url string
if strings.HasPrefix(path, "http") {
url = path
} else {
url = "https://api.cnb.cool" + path
}
req := base.RestyClient.R()
req.SetHeader("Accept", "application/json")
req.SetAuthScheme("Bearer")
req.SetAuthToken(d.Token)
if callback != nil {
callback(req)
}
res, err := req.Execute(method, url)
log.Debugln(res.String())
if err != nil {
return err
}
if res.StatusCode() != http.StatusOK && res.StatusCode() != http.StatusCreated && res.StatusCode() != http.StatusNoContent {
return fmt.Errorf("failed to request %s, status code: %d, message: %s", url, res.StatusCode(), res.String())
}
if resp != nil {
err = json.Unmarshal(res.Body(), resp)
if err != nil {
return err
}
}
return nil
}
func (d *CnbReleases) sumAssetsSize(assets []ReleaseAsset) int64 {
var size int64
for _, asset := range assets {
size += asset.Size
}
return size
}

View File

@ -292,10 +292,10 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if offset == 0 && limit > 0 { if offset == 0 && limit > 0 {
fileHeader = make([]byte, fileHeaderSize) fileHeader = make([]byte, fileHeaderSize)
n, _ := io.ReadFull(remoteReader, fileHeader) n, err := io.ReadFull(remoteReader, fileHeader)
if n != fileHeaderSize { if n != fileHeaderSize {
fileHeader = nil fileHeader = nil
return nil, fmt.Errorf("can't read data, expected=%d, got=%d", fileHeaderSize, n) return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", fileHeaderSize, n, err)
} }
if limit <= fileHeaderSize { if limit <= fileHeaderSize {
remoteReader.Close() remoteReader.Close()
@ -317,8 +317,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
} }
return readSeeker, nil return readSeeker, nil
}), }),
SyncClosers: utils.NewSyncClosers(remoteLink), SyncClosers: utils.NewSyncClosers(remoteLink),
ContentLength: remoteSize,
}, nil }, nil
} }
@ -402,7 +401,6 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
}, },
Reader: wrappedIn, Reader: wrappedIn,
Mimetype: "application/octet-stream", Mimetype: "application/octet-stream",
WebPutAsTask: streamer.NeedStore(),
ForceStreamUpload: true, ForceStreamUpload: true,
Exist: streamer.GetExist(), Exist: streamer.GetExist(),
} }
@ -413,6 +411,20 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
return nil return nil
} }
func (d *Crypt) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
wd, ok := d.remoteStorage.(driver.WithDetails)
if !ok {
return nil, errs.NotImplement
}
remoteDetails, err := wd.GetDetails(ctx)
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: remoteDetails.DiskUsage,
}, nil
}
//func (d *Safe) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { //func (d *Safe) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport // return nil, errs.NotSupport
//} //}

203
drivers/degoo/driver.go Normal file
View File

@ -0,0 +1,203 @@
package degoo
import (
"context"
"fmt"
"net/http"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
type Degoo struct {
model.Storage
Addition
client *http.Client
}
func (d *Degoo) Config() driver.Config {
return config
}
func (d *Degoo) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Degoo) Init(ctx context.Context) error {
d.client = base.HttpClient
// Ensure we have a valid token (will login if needed or refresh if expired)
if err := d.ensureValidToken(ctx); err != nil {
return fmt.Errorf("failed to initialize token: %w", err)
}
return d.getDevices(ctx)
}
func (d *Degoo) Drop(ctx context.Context) error {
return nil
}
func (d *Degoo) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
items, err := d.getAllFileChildren5(ctx, dir.GetID())
if err != nil {
return nil, err
}
return utils.MustSliceConvert(items, func(s DegooFileItem) model.Obj {
isFolder := s.Category == 2 || s.Category == 1 || s.Category == 10
createTime, modTime, _ := humanReadableTimes(s.CreationTime, s.LastModificationTime, s.LastUploadTime)
size, err := strconv.ParseInt(s.Size, 10, 64)
if err != nil {
size = 0 // Default to 0 if size parsing fails
}
return &model.Object{
ID: s.ID,
Path: s.FilePath,
Name: s.Name,
Size: size,
Modified: modTime,
Ctime: createTime,
IsFolder: isFolder,
}
}), nil
}
func (d *Degoo) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
item, err := d.getOverlay4(ctx, file.GetID())
if err != nil {
return nil, err
}
return &model.Link{URL: item.URL}, nil
}
func (d *Degoo) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
// This is done by calling the setUploadFile3 API with a special checksum and size.
const query = `mutation SetUploadFile3($Token: String!, $FileInfos: [FileInfoUpload3]!) { setUploadFile3(Token: $Token, FileInfos: $FileInfos) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"FileInfos": []map[string]interface{}{
{
"Checksum": folderChecksum,
"Name": dirName,
"CreationTime": time.Now().UnixMilli(),
"ParentID": parentDir.GetID(),
"Size": 0,
},
},
}
_, err := d.apiCall(ctx, "SetUploadFile3", query, variables)
if err != nil {
return err
}
return nil
}
func (d *Degoo) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
const query = `mutation SetMoveFile($Token: String!, $Copy: Boolean, $NewParentID: String!, $FileIDs: [String]!) { setMoveFile(Token: $Token, Copy: $Copy, NewParentID: $NewParentID, FileIDs: $FileIDs) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"Copy": false,
"NewParentID": dstDir.GetID(),
"FileIDs": []string{srcObj.GetID()},
}
_, err := d.apiCall(ctx, "SetMoveFile", query, variables)
if err != nil {
return nil, err
}
return srcObj, nil
}
func (d *Degoo) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
const query = `mutation SetRenameFile($Token: String!, $FileRenames: [FileRenameInfo]!) { setRenameFile(Token: $Token, FileRenames: $FileRenames) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"FileRenames": []DegooFileRenameInfo{
{
ID: srcObj.GetID(),
NewName: newName,
},
},
}
_, err := d.apiCall(ctx, "SetRenameFile", query, variables)
if err != nil {
return err
}
return nil
}
func (d *Degoo) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
// Copy is not implemented, Degoo API does not support direct copy.
return nil, errs.NotImplement
}
func (d *Degoo) Remove(ctx context.Context, obj model.Obj) error {
// Remove deletes a file or folder (moves to trash).
const query = `mutation SetDeleteFile5($Token: String!, $IsInRecycleBin: Boolean!, $IDs: [IDType]!) { setDeleteFile5(Token: $Token, IsInRecycleBin: $IsInRecycleBin, IDs: $IDs) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"IsInRecycleBin": false,
"IDs": []map[string]string{{"FileID": obj.GetID()}},
}
_, err := d.apiCall(ctx, "SetDeleteFile5", query, variables)
return err
}
func (d *Degoo) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
tmpF, err := file.CacheFullAndWriter(&up, nil)
if err != nil {
return err
}
parentID := dstDir.GetID()
// Calculate the checksum for the file.
checksum, err := d.checkSum(tmpF)
if err != nil {
return err
}
// 1. Get upload authorization via getBucketWriteAuth4.
auths, err := d.getBucketWriteAuth4(ctx, file, parentID, checksum)
if err != nil {
return err
}
// 2. Upload file.
// support rapid upload
if auths.GetBucketWriteAuth4[0].Error != "Already exist!" {
err = d.uploadS3(ctx, auths, tmpF, file, checksum)
if err != nil {
return err
}
}
// 3. Register metadata with setUploadFile3.
data, err := d.SetUploadFile3(ctx, file, parentID, checksum)
if err != nil {
return err
}
if !data.SetUploadFile3 {
return fmt.Errorf("setUploadFile3 failed: %v", data)
}
return nil
}

27
drivers/degoo/meta.go Normal file
View File

@ -0,0 +1,27 @@
package degoo
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
driver.RootID
Username string `json:"username" help:"Your Degoo account email"`
Password string `json:"password" help:"Your Degoo account password"`
RefreshToken string `json:"refresh_token" help:"Refresh token for automatic token renewal, obtained automatically"`
AccessToken string `json:"access_token" help:"Access token for Degoo API, obtained automatically"`
}
var config = driver.Config{
Name: "Degoo",
LocalSort: true,
DefaultRoot: "0",
NoOverwriteUpload: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Degoo{}
})
}

110
drivers/degoo/types.go Normal file
View File

@ -0,0 +1,110 @@
package degoo
import (
"encoding/json"
)
// DegooLoginRequest represents the login request body.
type DegooLoginRequest struct {
GenerateToken bool `json:"GenerateToken"`
Username string `json:"Username"`
Password string `json:"Password"`
}
// DegooLoginResponse represents a successful login response.
type DegooLoginResponse struct {
Token string `json:"Token"`
RefreshToken string `json:"RefreshToken"`
}
// DegooAccessTokenRequest represents the token refresh request body.
type DegooAccessTokenRequest struct {
RefreshToken string `json:"RefreshToken"`
}
// DegooAccessTokenResponse represents the token refresh response.
type DegooAccessTokenResponse struct {
AccessToken string `json:"AccessToken"`
}
// DegooFileItem represents a Degoo file or folder.
type DegooFileItem struct {
ID string `json:"ID"`
ParentID string `json:"ParentID"`
Name string `json:"Name"`
Category int `json:"Category"`
Size string `json:"Size"`
URL string `json:"URL"`
CreationTime string `json:"CreationTime"`
LastModificationTime string `json:"LastModificationTime"`
LastUploadTime string `json:"LastUploadTime"`
MetadataID string `json:"MetadataID"`
DeviceID int64 `json:"DeviceID"`
FilePath string `json:"FilePath"`
IsInRecycleBin bool `json:"IsInRecycleBin"`
}
type DegooErrors struct {
Path []string `json:"path"`
Data interface{} `json:"data"`
ErrorType string `json:"errorType"`
ErrorInfo interface{} `json:"errorInfo"`
Message string `json:"message"`
}
// DegooGraphqlResponse is the common structure for GraphQL API responses.
type DegooGraphqlResponse struct {
Data json.RawMessage `json:"data"`
Errors []DegooErrors `json:"errors,omitempty"`
}
// DegooGetChildren5Data is the data field for getFileChildren5.
type DegooGetChildren5Data struct {
GetFileChildren5 struct {
Items []DegooFileItem `json:"Items"`
NextToken string `json:"NextToken"`
} `json:"getFileChildren5"`
}
// DegooGetOverlay4Data is the data field for getOverlay4.
type DegooGetOverlay4Data struct {
GetOverlay4 DegooFileItem `json:"getOverlay4"`
}
// DegooFileRenameInfo represents a file rename operation.
type DegooFileRenameInfo struct {
ID string `json:"ID"`
NewName string `json:"NewName"`
}
// DegooFileIDs represents a list of file IDs for move operations.
type DegooFileIDs struct {
FileIDs []string `json:"FileIDs"`
}
// DegooGetBucketWriteAuth4Data is the data field for GetBucketWriteAuth4.
type DegooGetBucketWriteAuth4Data struct {
GetBucketWriteAuth4 []struct {
AuthData struct {
PolicyBase64 string `json:"PolicyBase64"`
Signature string `json:"Signature"`
BaseURL string `json:"BaseURL"`
KeyPrefix string `json:"KeyPrefix"`
AccessKey struct {
Key string `json:"Key"`
Value string `json:"Value"`
} `json:"AccessKey"`
ACL string `json:"ACL"`
AdditionalBody []struct {
Key string `json:"Key"`
Value string `json:"Value"`
} `json:"AdditionalBody"`
} `json:"AuthData"`
Error interface{} `json:"Error"`
} `json:"getBucketWriteAuth4"`
}
// DegooSetUploadFile3Data is the data field for SetUploadFile3.
type DegooSetUploadFile3Data struct {
SetUploadFile3 bool `json:"setUploadFile3"`
}

198
drivers/degoo/upload.go Normal file
View File

@ -0,0 +1,198 @@
package degoo
import (
"bytes"
"context"
"crypto/sha1"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"strconv"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
func (d *Degoo) getBucketWriteAuth4(ctx context.Context, file model.FileStreamer, parentID string, checksum string) (*DegooGetBucketWriteAuth4Data, error) {
const query = `query GetBucketWriteAuth4(
$Token: String!
$ParentID: String!
$StorageUploadInfos: [StorageUploadInfo2]
) {
getBucketWriteAuth4(
Token: $Token
ParentID: $ParentID
StorageUploadInfos: $StorageUploadInfos
) {
AuthData {
PolicyBase64
Signature
BaseURL
KeyPrefix
AccessKey {
Key
Value
}
ACL
AdditionalBody {
Key
Value
}
}
Error
}
}`
variables := map[string]interface{}{
"Token": d.AccessToken,
"ParentID": parentID,
"StorageUploadInfos": []map[string]string{{
"FileName": file.GetName(),
"Checksum": checksum,
"Size": strconv.FormatInt(file.GetSize(), 10),
}}}
data, err := d.apiCall(ctx, "GetBucketWriteAuth4", query, variables)
if err != nil {
return nil, err
}
var resp DegooGetBucketWriteAuth4Data
err = json.Unmarshal(data, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
// checkSum calculates the SHA1-based checksum for Degoo upload API.
func (d *Degoo) checkSum(file io.Reader) (string, error) {
seed := []byte{13, 7, 2, 2, 15, 40, 75, 117, 13, 10, 19, 16, 29, 23, 3, 36}
hasher := sha1.New()
hasher.Write(seed)
if _, err := utils.CopyWithBuffer(hasher, file); err != nil {
return "", err
}
cs := hasher.Sum(nil)
csBytes := []byte{10, byte(len(cs))}
csBytes = append(csBytes, cs...)
csBytes = append(csBytes, 16, 0)
return strings.ReplaceAll(base64.StdEncoding.EncodeToString(csBytes), "/", "_"), nil
}
func (d *Degoo) uploadS3(ctx context.Context, auths *DegooGetBucketWriteAuth4Data, tmpF model.File, file model.FileStreamer, checksum string) error {
a := auths.GetBucketWriteAuth4[0].AuthData
_, err := tmpF.Seek(0, io.SeekStart)
if err != nil {
return err
}
ext := utils.Ext(file.GetName())
key := fmt.Sprintf("%s%s/%s.%s", a.KeyPrefix, ext, checksum, ext)
var b bytes.Buffer
w := multipart.NewWriter(&b)
err = w.WriteField("key", key)
if err != nil {
return err
}
err = w.WriteField("acl", a.ACL)
if err != nil {
return err
}
err = w.WriteField("policy", a.PolicyBase64)
if err != nil {
return err
}
err = w.WriteField("signature", a.Signature)
if err != nil {
return err
}
err = w.WriteField(a.AccessKey.Key, a.AccessKey.Value)
if err != nil {
return err
}
for _, additional := range a.AdditionalBody {
err = w.WriteField(additional.Key, additional.Value)
if err != nil {
return err
}
}
err = w.WriteField("Content-Type", "")
if err != nil {
return err
}
_, err = w.CreateFormFile("file", key)
if err != nil {
return err
}
headSize := b.Len()
err = w.Close()
if err != nil {
return err
}
head := bytes.NewReader(b.Bytes()[:headSize])
tail := bytes.NewReader(b.Bytes()[headSize:])
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.MultiReader(head, tmpF, tail))
req, err := http.NewRequestWithContext(ctx, http.MethodPost, a.BaseURL, rateLimitedRd)
if err != nil {
return err
}
req.Header.Add("ngsw-bypass", "1")
req.Header.Add("Content-Type", w.FormDataContentType())
res, err := d.client.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusNoContent {
return fmt.Errorf("upload failed with status code %d", res.StatusCode)
}
return nil
}
var _ driver.Driver = (*Degoo)(nil)
func (d *Degoo) SetUploadFile3(ctx context.Context, file model.FileStreamer, parentID string, checksum string) (*DegooSetUploadFile3Data, error) {
const query = `mutation SetUploadFile3($Token: String!, $FileInfos: [FileInfoUpload3]!) {
setUploadFile3(Token: $Token, FileInfos: $FileInfos)
}`
variables := map[string]interface{}{
"Token": d.AccessToken,
"FileInfos": []map[string]string{{
"Checksum": checksum,
"CreationTime": strconv.FormatInt(file.CreateTime().UnixMilli(), 10),
"Name": file.GetName(),
"ParentID": parentID,
"Size": strconv.FormatInt(file.GetSize(), 10),
}}}
data, err := d.apiCall(ctx, "SetUploadFile3", query, variables)
if err != nil {
return nil, err
}
var resp DegooSetUploadFile3Data
err = json.Unmarshal(data, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}

462
drivers/degoo/util.go Normal file
View File

@ -0,0 +1,462 @@
package degoo
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
// Thanks to https://github.com/bernd-wechner/Degoo for API research.
const (
// API endpoints
loginURL = "https://rest-api.degoo.com/login"
accessTokenURL = "https://rest-api.degoo.com/access-token/v2"
apiURL = "https://production-appsync.degoo.com/graphql"
// API configuration
apiKey = "da2-vs6twz5vnjdavpqndtbzg3prra"
folderChecksum = "CgAQAg"
// Token management
tokenRefreshThreshold = 5 * time.Minute
// Rate limiting
minRequestInterval = 1 * time.Second
// Error messages
errRateLimited = "rate limited (429), please try again later"
errUnauthorized = "unauthorized access"
)
var (
// Global rate limiting - protects against concurrent API calls
lastRequestTime time.Time
requestMutex sync.Mutex
)
// JWT payload structure for token expiration checking
type JWTPayload struct {
UserID string `json:"userID"`
Exp int64 `json:"exp"`
Iat int64 `json:"iat"`
}
// Rate limiting helper functions
// applyRateLimit ensures minimum interval between API requests
func applyRateLimit() {
requestMutex.Lock()
defer requestMutex.Unlock()
if !lastRequestTime.IsZero() {
if elapsed := time.Since(lastRequestTime); elapsed < minRequestInterval {
time.Sleep(minRequestInterval - elapsed)
}
}
lastRequestTime = time.Now()
}
// HTTP request helper functions
// createJSONRequest creates a new HTTP request with JSON body
func createJSONRequest(ctx context.Context, method, url string, body interface{}) (*http.Request, error) {
jsonBody, err := json.Marshal(body)
if err != nil {
return nil, fmt.Errorf("failed to marshal request body: %w", err)
}
req, err := http.NewRequestWithContext(ctx, method, url, bytes.NewBuffer(jsonBody))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", base.UserAgent)
return req, nil
}
// checkHTTPResponse checks for common HTTP error conditions
func checkHTTPResponse(resp *http.Response, operation string) error {
if resp.StatusCode == http.StatusTooManyRequests {
return fmt.Errorf("%s %s", operation, errRateLimited)
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s failed: %s", operation, resp.Status)
}
return nil
}
// isTokenExpired checks if the JWT token is expired or will expire soon
func (d *Degoo) isTokenExpired() bool {
if d.AccessToken == "" {
return true
}
payload, err := extractJWTPayload(d.AccessToken)
if err != nil {
return true // Invalid token format
}
// Check if token expires within the threshold
expireTime := time.Unix(payload.Exp, 0)
return time.Now().Add(tokenRefreshThreshold).After(expireTime)
}
// extractJWTPayload extracts and parses JWT payload
func extractJWTPayload(token string) (*JWTPayload, error) {
parts := strings.Split(token, ".")
if len(parts) != 3 {
return nil, fmt.Errorf("invalid JWT format")
}
// Decode the payload (second part)
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
return nil, fmt.Errorf("failed to decode JWT payload: %w", err)
}
var jwtPayload JWTPayload
if err := json.Unmarshal(payload, &jwtPayload); err != nil {
return nil, fmt.Errorf("failed to parse JWT payload: %w", err)
}
return &jwtPayload, nil
}
// refreshToken attempts to refresh the access token using the refresh token
func (d *Degoo) refreshToken(ctx context.Context) error {
if d.RefreshToken == "" {
return fmt.Errorf("no refresh token available")
}
// Create request
tokenReq := DegooAccessTokenRequest{RefreshToken: d.RefreshToken}
req, err := createJSONRequest(ctx, "POST", accessTokenURL, tokenReq)
if err != nil {
return fmt.Errorf("failed to create refresh token request: %w", err)
}
// Execute request
resp, err := d.client.Do(req)
if err != nil {
return fmt.Errorf("refresh token request failed: %w", err)
}
defer resp.Body.Close()
// Check response
if err := checkHTTPResponse(resp, "refresh token"); err != nil {
return err
}
var accessTokenResp DegooAccessTokenResponse
if err := json.NewDecoder(resp.Body).Decode(&accessTokenResp); err != nil {
return fmt.Errorf("failed to parse access token response: %w", err)
}
if accessTokenResp.AccessToken == "" {
return fmt.Errorf("empty access token received")
}
d.AccessToken = accessTokenResp.AccessToken
// Save the updated token to storage
op.MustSaveDriverStorage(d)
return nil
}
// ensureValidToken ensures we have a valid, non-expired token
func (d *Degoo) ensureValidToken(ctx context.Context) error {
// Check if token is expired or will expire soon
if d.isTokenExpired() {
// Try to refresh token first if we have a refresh token
if d.RefreshToken != "" {
if refreshErr := d.refreshToken(ctx); refreshErr == nil {
return nil // Successfully refreshed
} else {
// If refresh failed, fall back to full login
fmt.Printf("Token refresh failed, falling back to full login: %v\n", refreshErr)
}
}
// Perform full login
if d.Username != "" && d.Password != "" {
return d.login(ctx)
}
}
return nil
}
// login performs the login process and retrieves the access token.
func (d *Degoo) login(ctx context.Context) error {
if d.Username == "" || d.Password == "" {
return fmt.Errorf("username or password not provided")
}
creds := DegooLoginRequest{
GenerateToken: true,
Username: d.Username,
Password: d.Password,
}
jsonCreds, err := json.Marshal(creds)
if err != nil {
return fmt.Errorf("failed to serialize login credentials: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", loginURL, bytes.NewBuffer(jsonCreds))
if err != nil {
return fmt.Errorf("failed to create login request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", base.UserAgent)
req.Header.Set("Origin", "https://app.degoo.com")
resp, err := d.client.Do(req)
if err != nil {
return fmt.Errorf("login request failed: %w", err)
}
defer resp.Body.Close()
// Handle rate limiting (429 Too Many Requests)
if resp.StatusCode == http.StatusTooManyRequests {
return fmt.Errorf("login rate limited (429), please try again later")
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("login failed: %s", resp.Status)
}
var loginResp DegooLoginResponse
if err := json.NewDecoder(resp.Body).Decode(&loginResp); err != nil {
return fmt.Errorf("failed to parse login response: %w", err)
}
if loginResp.RefreshToken != "" {
tokenReq := DegooAccessTokenRequest{RefreshToken: loginResp.RefreshToken}
jsonTokenReq, err := json.Marshal(tokenReq)
if err != nil {
return fmt.Errorf("failed to serialize access token request: %w", err)
}
tokenReqHTTP, err := http.NewRequestWithContext(ctx, "POST", accessTokenURL, bytes.NewBuffer(jsonTokenReq))
if err != nil {
return fmt.Errorf("failed to create access token request: %w", err)
}
tokenReqHTTP.Header.Set("User-Agent", base.UserAgent)
tokenResp, err := d.client.Do(tokenReqHTTP)
if err != nil {
return fmt.Errorf("failed to get access token: %w", err)
}
defer tokenResp.Body.Close()
var accessTokenResp DegooAccessTokenResponse
if err := json.NewDecoder(tokenResp.Body).Decode(&accessTokenResp); err != nil {
return fmt.Errorf("failed to parse access token response: %w", err)
}
d.AccessToken = accessTokenResp.AccessToken
d.RefreshToken = loginResp.RefreshToken // Save refresh token
} else if loginResp.Token != "" {
d.AccessToken = loginResp.Token
d.RefreshToken = "" // Direct token, no refresh token available
} else {
return fmt.Errorf("login failed, no valid token returned")
}
// Save the updated tokens to storage
op.MustSaveDriverStorage(d)
return nil
}
// apiCall performs a Degoo GraphQL API request.
func (d *Degoo) apiCall(ctx context.Context, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
// Apply rate limiting
applyRateLimit()
// Ensure we have a valid token before making the API call
if err := d.ensureValidToken(ctx); err != nil {
return nil, fmt.Errorf("failed to ensure valid token: %w", err)
}
// Update the Token in variables if it exists (after potential refresh)
d.updateTokenInVariables(variables)
return d.executeGraphQLRequest(ctx, operationName, query, variables)
}
// updateTokenInVariables updates the Token field in GraphQL variables
func (d *Degoo) updateTokenInVariables(variables map[string]interface{}) {
if variables != nil {
if _, hasToken := variables["Token"]; hasToken {
variables["Token"] = d.AccessToken
}
}
}
// executeGraphQLRequest executes a GraphQL request with retry logic
func (d *Degoo) executeGraphQLRequest(ctx context.Context, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
reqBody := map[string]interface{}{
"operationName": operationName,
"query": query,
"variables": variables,
}
// Create and configure request
req, err := createJSONRequest(ctx, "POST", apiURL, reqBody)
if err != nil {
return nil, err
}
// Set Degoo-specific headers
req.Header.Set("x-api-key", apiKey)
if d.AccessToken != "" {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.AccessToken))
}
// Execute request
resp, err := d.client.Do(req)
if err != nil {
return nil, fmt.Errorf("GraphQL API request failed: %w", err)
}
defer resp.Body.Close()
// Check for HTTP errors
if err := checkHTTPResponse(resp, "GraphQL API"); err != nil {
return nil, err
}
// Parse GraphQL response
var degooResp DegooGraphqlResponse
if err := json.NewDecoder(resp.Body).Decode(&degooResp); err != nil {
return nil, fmt.Errorf("failed to decode GraphQL response: %w", err)
}
// Handle GraphQL errors
if len(degooResp.Errors) > 0 {
return d.handleGraphQLError(ctx, degooResp.Errors[0], operationName, query, variables)
}
return degooResp.Data, nil
}
// handleGraphQLError handles GraphQL-level errors with retry logic
func (d *Degoo) handleGraphQLError(ctx context.Context, gqlError DegooErrors, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
if gqlError.ErrorType == "Unauthorized" {
// Re-login and retry
if err := d.login(ctx); err != nil {
return nil, fmt.Errorf("%s, login failed: %w", errUnauthorized, err)
}
// Update token in variables and retry
d.updateTokenInVariables(variables)
return d.apiCall(ctx, operationName, query, variables)
}
return nil, fmt.Errorf("GraphQL API error: %s", gqlError.Message)
}
// humanReadableTimes converts Degoo timestamps to Go time.Time.
func humanReadableTimes(creation, modification, upload string) (cTime, mTime, uTime time.Time) {
cTime, _ = time.Parse(time.RFC3339, creation)
if modification != "" {
modMillis, _ := strconv.ParseInt(modification, 10, 64)
mTime = time.Unix(0, modMillis*int64(time.Millisecond))
}
if upload != "" {
upMillis, _ := strconv.ParseInt(upload, 10, 64)
uTime = time.Unix(0, upMillis*int64(time.Millisecond))
}
return cTime, mTime, uTime
}
// getDevices fetches and caches top-level devices and folders.
func (d *Degoo) getDevices(ctx context.Context) error {
const query = `query GetFileChildren5($Token: String! $ParentID: String $AllParentIDs: [String] $Limit: Int! $Order: Int! $NextToken: String ) { getFileChildren5(Token: $Token ParentID: $ParentID AllParentIDs: $AllParentIDs Limit: $Limit Order: $Order NextToken: $NextToken) { Items { ParentID } NextToken } }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"ParentID": "0",
"Limit": 10,
"Order": 3,
}
data, err := d.apiCall(ctx, "GetFileChildren5", query, variables)
if err != nil {
return err
}
var resp DegooGetChildren5Data
if err := json.Unmarshal(data, &resp); err != nil {
return fmt.Errorf("failed to parse device list: %w", err)
}
if d.RootFolderID == "0" {
if len(resp.GetFileChildren5.Items) > 0 {
d.RootFolderID = resp.GetFileChildren5.Items[0].ParentID
}
op.MustSaveDriverStorage(d)
}
return nil
}
// getAllFileChildren5 fetches all children of a directory with pagination.
func (d *Degoo) getAllFileChildren5(ctx context.Context, parentID string) ([]DegooFileItem, error) {
const query = `query GetFileChildren5($Token: String! $ParentID: String $AllParentIDs: [String] $Limit: Int! $Order: Int! $NextToken: String ) { getFileChildren5(Token: $Token ParentID: $ParentID AllParentIDs: $AllParentIDs Limit: $Limit Order: $Order NextToken: $NextToken) { Items { ID ParentID Name Category Size CreationTime LastModificationTime LastUploadTime FilePath IsInRecycleBin DeviceID MetadataID } NextToken } }`
var allItems []DegooFileItem
nextToken := ""
for {
variables := map[string]interface{}{
"Token": d.AccessToken,
"ParentID": parentID,
"Limit": 1000,
"Order": 3,
}
if nextToken != "" {
variables["NextToken"] = nextToken
}
data, err := d.apiCall(ctx, "GetFileChildren5", query, variables)
if err != nil {
return nil, err
}
var resp DegooGetChildren5Data
if err := json.Unmarshal(data, &resp); err != nil {
return nil, err
}
allItems = append(allItems, resp.GetFileChildren5.Items...)
if resp.GetFileChildren5.NextToken == "" {
break
}
nextToken = resp.GetFileChildren5.NextToken
}
return allItems, nil
}
// getOverlay4 fetches metadata for a single item by ID.
func (d *Degoo) getOverlay4(ctx context.Context, id string) (DegooFileItem, error) {
const query = `query GetOverlay4($Token: String!, $ID: IDType!) { getOverlay4(Token: $Token, ID: $ID) { ID ParentID Name Category Size CreationTime LastModificationTime LastUploadTime URL FilePath IsInRecycleBin DeviceID MetadataID } }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"ID": map[string]string{
"FileID": id,
},
}
data, err := d.apiCall(ctx, "GetOverlay4", query, variables)
if err != nil {
return DegooFileItem{}, err
}
var resp DegooGetOverlay4Data
if err := json.Unmarshal(data, &resp); err != nil {
return DegooFileItem{}, fmt.Errorf("failed to parse item metadata: %w", err)
}
return resp.GetOverlay4, nil
}

View File

@ -236,7 +236,7 @@ func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
// 根据文件大小选择上传方式 // 根据文件大小选择上传方式
if file.GetSize() <= 1*utils.MB { // 小于1MB使用普通模式上传 if file.GetSize() <= 1*utils.MB { // 小于1MB使用普通模式上传
return d.Upload(&uploadConfig, dstDir, file, up, dataType) return d.Upload(ctx, &uploadConfig, dstDir, file, up, dataType)
} }
// 大文件使用分片上传 // 大文件使用分片上传
return d.UploadByMultipart(ctx, &uploadConfig, file.GetSize(), dstDir, file, up, dataType) return d.UploadByMultipart(ctx, &uploadConfig, file.GetSize(), dstDir, file, up, dataType)

View File

@ -129,7 +129,7 @@ type BuiAuditInfo struct {
AuditInfo AuditInfo `json:"audit_info"` AuditInfo AuditInfo `json:"audit_info"`
IsAuditing bool `json:"is_auditing"` IsAuditing bool `json:"is_auditing"`
AuditStatus int `json:"audit_status"` AuditStatus int `json:"audit_status"`
LastUpdateTime int `json:"last_update_time"` LastUpdateTime int64 `json:"last_update_time"`
UnpassReason string `json:"unpass_reason"` UnpassReason string `json:"unpass_reason"`
Details Details `json:"details"` Details Details `json:"details"`
} }
@ -184,7 +184,7 @@ type UserInfo struct {
SecUserID string `json:"sec_user_id"` SecUserID string `json:"sec_user_id"`
SessionKey string `json:"session_key"` SessionKey string `json:"session_key"`
UseHmRegion bool `json:"use_hm_region"` UseHmRegion bool `json:"use_hm_region"`
UserCreateTime int `json:"user_create_time"` UserCreateTime int64 `json:"user_create_time"`
UserID int64 `json:"user_id"` UserID int64 `json:"user_id"`
UserIDStr string `json:"user_id_str"` UserIDStr string `json:"user_id_str"`
UserVerified bool `json:"user_verified"` UserVerified bool `json:"user_verified"`

View File

@ -24,6 +24,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup" "github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go" "github.com/avast/retry-go"
@ -447,41 +448,67 @@ func (d *Doubao) uploadNode(uploadConfig *UploadConfig, dir model.Obj, file mode
} }
// Upload 普通上传实现 // Upload 普通上传实现
func (d *Doubao) Upload(config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) { func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
data, err := io.ReadAll(file) ss, err := stream.NewStreamSectionReader(file, int(file.GetSize()), &up)
if err != nil {
return nil, err
}
reader, err := ss.GetSectionReader(0, file.GetSize())
if err != nil { if err != nil {
return nil, err return nil, err
} }
// 计算CRC32 // 计算CRC32
crc32Hash := crc32.NewIEEE() crc32Hash := crc32.NewIEEE()
crc32Hash.Write(data) w, err := utils.CopyWithBuffer(crc32Hash, reader)
if w != file.GetSize() {
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", file.GetSize(), w, err)
}
crc32Value := hex.EncodeToString(crc32Hash.Sum(nil)) crc32Value := hex.EncodeToString(crc32Hash.Sum(nil))
// 构建请求路径 // 构建请求路径
uploadNode := config.InnerUploadAddress.UploadNodes[0] uploadNode := config.InnerUploadAddress.UploadNodes[0]
storeInfo := uploadNode.StoreInfos[0] storeInfo := uploadNode.StoreInfos[0]
uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI) uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI)
rateLimitedRd := driver.NewLimitedUploadStream(ctx, reader)
uploadResp := UploadResp{} err = d._retryOperation("Upload", func() error {
reader.Seek(0, io.SeekStart)
if _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) { req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl, rateLimitedRd)
req.SetHeaders(map[string]string{ if err != nil {
"Content-Type": "application/octet-stream", return err
"Content-Crc32": crc32Value, }
"Content-Length": fmt.Sprintf("%d", len(data)), req.Header = map[string][]string{
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)), "Referer": {BaseURL + "/"},
}) "Origin": {BaseURL},
"User-Agent": {UserAgent},
req.SetBody(data) "X-Storage-U": {d.UserId},
}, &uploadResp); err != nil { "Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {fmt.Sprintf("%d", file.GetSize())},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
resp := UploadResp{}
utils.Json.Unmarshal(bytes, &resp)
if resp.Code != 2000 {
return fmt.Errorf("upload part failed: %s", resp.Message)
} else if resp.Data.Crc32 != crc32Value {
return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, resp.Data.Crc32)
}
return nil
})
ss.FreeSectionReader(reader)
if err != nil {
return nil, err return nil, err
} }
if uploadResp.Code != 2000 {
return nil, fmt.Errorf("upload failed: %s", uploadResp.Message)
}
uploadNodeResp, err := d.uploadNode(config, dstDir, file, dataType) uploadNodeResp, err := d.uploadNode(config, dstDir, file, dataType)
if err != nil { if err != nil {
return nil, err return nil, err
@ -516,68 +543,107 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
if config.InnerUploadAddress.AdvanceOption.SliceSize > 0 { if config.InnerUploadAddress.AdvanceOption.SliceSize > 0 {
chunkSize = int64(config.InnerUploadAddress.AdvanceOption.SliceSize) chunkSize = int64(config.InnerUploadAddress.AdvanceOption.SliceSize)
} }
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return nil, err
}
totalParts := (fileSize + chunkSize - 1) / chunkSize totalParts := (fileSize + chunkSize - 1) / chunkSize
// 创建分片信息组 // 创建分片信息组
parts := make([]UploadPart, totalParts) parts := make([]UploadPart, totalParts)
// 缓存文件
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, fmt.Errorf("failed to cache file: %w", err)
}
up(10.0) // 更新进度 up(10.0) // 更新进度
// 设置并行上传 // 设置并行上传
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread, thread := min(int(totalParts), d.uploadThread)
retry.Attempts(1), threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(MaxRetryAttempts),
retry.Delay(time.Second), retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay)) retry.DelayType(retry.BackOffDelay),
retry.MaxJitter(200*time.Millisecond),
)
var partsMutex sync.Mutex var partsMutex sync.Mutex
// 并行上传所有分片 // 并行上传所有分片
for partIndex := int64(0); partIndex < totalParts; partIndex++ { hash := crc32.NewIEEE()
for partIndex := range totalParts {
if utils.IsCanceled(uploadCtx) { if utils.IsCanceled(uploadCtx) {
break break
} }
partIndex := partIndex
partNumber := partIndex + 1 // 分片编号从1开始 partNumber := partIndex + 1 // 分片编号从1开始
threadG.Go(func(ctx context.Context) error { // 计算此分片的大小和偏移
// 计算此分片的大小和偏移 offset := partIndex * chunkSize
offset := partIndex * chunkSize size := chunkSize
size := chunkSize if partIndex == totalParts-1 {
if partIndex == totalParts-1 { size = fileSize - offset
size = fileSize - offset }
} var reader *stream.SectionReader
var rateLimitedRd io.Reader
limitedReader := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, size)) crc32Value := ""
// 读取数据到内存 threadG.GoWithLifecycle(errgroup.Lifecycle{
data, err := io.ReadAll(limitedReader) Before: func(ctx context.Context) error {
if err != nil { if reader == nil {
return fmt.Errorf("failed to read part %d: %w", partNumber, err) var err error
} reader, err = ss.GetSectionReader(offset, size)
// 计算CRC32 if err != nil {
crc32Value := calculateCRC32(data) return err
// 使用_retryOperation上传分片 }
var uploadPart UploadPart hash.Reset()
if err = d._retryOperation(fmt.Sprintf("Upload part %d", partNumber), func() error { w, err := utils.CopyWithBuffer(hash, reader)
var err error if w != size {
uploadPart, err = d.uploadPart(config, uploadUrl, uploadID, partNumber, data, crc32Value) return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", size, w, err)
return err }
}); err != nil { crc32Value = hex.EncodeToString(hash.Sum(nil))
return fmt.Errorf("part %d upload failed: %w", partNumber, err) rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
} }
// 记录成功上传的分片 return nil
partsMutex.Lock() },
parts[partIndex] = UploadPart{ Do: func(ctx context.Context) error {
PartNumber: strconv.FormatInt(partNumber, 10), reader.Seek(0, io.SeekStart)
Etag: uploadPart.Etag, req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s?uploadid=%s&part_number=%d&phase=transfer", uploadUrl, uploadID, partNumber), rateLimitedRd)
Crc32: crc32Value, if err != nil {
} return err
partsMutex.Unlock() }
// 更新进度 req.Header = map[string][]string{
progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts) "Referer": {BaseURL + "/"},
up(math.Min(progress, 95.0)) "Origin": {BaseURL},
"User-Agent": {UserAgent},
return nil "X-Storage-U": {d.UserId},
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {fmt.Sprintf("%d", size)},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
uploadResp := UploadResp{}
utils.Json.Unmarshal(bytes, &uploadResp)
if uploadResp.Code != 2000 {
return fmt.Errorf("upload part failed: %s", uploadResp.Message)
} else if uploadResp.Data.Crc32 != crc32Value {
return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32)
}
// 记录成功上传的分片
partsMutex.Lock()
parts[partIndex] = UploadPart{
PartNumber: strconv.FormatInt(partNumber, 10),
Etag: uploadResp.Data.Etag,
Crc32: crc32Value,
}
partsMutex.Unlock()
// 更新进度
progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts)
up(math.Min(progress, 95.0))
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
}) })
} }
@ -680,42 +746,6 @@ func (d *Doubao) initMultipartUpload(config *UploadConfig, uploadUrl string, sto
return uploadResp.Data.UploadId, nil return uploadResp.Data.UploadId, nil
} }
// 分片上传实现
func (d *Doubao) uploadPart(config *UploadConfig, uploadUrl, uploadID string, partNumber int64, data []byte, crc32Value string) (resp UploadPart, err error) {
uploadResp := UploadResp{}
storeInfo := config.InnerUploadAddress.UploadNodes[0].StoreInfos[0]
_, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Type": "application/octet-stream",
"Content-Crc32": crc32Value,
"Content-Length": fmt.Sprintf("%d", len(data)),
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
})
req.SetQueryParams(map[string]string{
"uploadid": uploadID,
"part_number": strconv.FormatInt(partNumber, 10),
"phase": "transfer",
})
req.SetBody(data)
req.SetContentLength(true)
}, &uploadResp)
if err != nil {
return resp, err
}
if uploadResp.Code != 2000 {
return resp, fmt.Errorf("upload part failed: %s", uploadResp.Message)
} else if uploadResp.Data.Crc32 != crc32Value {
return resp, fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32)
}
return uploadResp.Data, nil
}
// 完成分片上传 // 完成分片上传
func (d *Doubao) completeMultipartUpload(config *UploadConfig, uploadUrl, uploadID string, parts []UploadPart) error { func (d *Doubao) completeMultipartUpload(config *UploadConfig, uploadUrl, uploadID string, parts []UploadPart) error {
uploadResp := UploadResp{} uploadResp := UploadResp{}
@ -784,13 +814,6 @@ func (d *Doubao) commitMultipartUpload(uploadConfig *UploadConfig) error {
return nil return nil
} }
// 计算CRC32
func calculateCRC32(data []byte) string {
hash := crc32.NewIEEE()
hash.Write(data)
return hex.EncodeToString(hash.Sum(nil))
}
// _retryOperation 操作重试 // _retryOperation 操作重试
func (d *Doubao) _retryOperation(operation string, fn func() error) error { func (d *Doubao) _retryOperation(operation string, fn func() error) error {
return retry.Do( return retry.Do(

View File

@ -79,11 +79,11 @@ type ShareInfo struct {
RiskReviewStatus int `json:"risk_review_status"` RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"` ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"` ParentID string `json:"parent_id"`
CreateTime int `json:"create_time"` CreateTime int64 `json:"create_time"`
UpdateTime int `json:"update_time"` UpdateTime int64 `json:"update_time"`
} `json:"first_node"` } `json:"first_node"`
NodeCount int `json:"node_count"` NodeCount int `json:"node_count"`
CreateTime int `json:"create_time"` CreateTime int64 `json:"create_time"`
Channel string `json:"channel"` Channel string `json:"channel"`
InfluencerType int `json:"influencer_type"` InfluencerType int `json:"influencer_type"`
} }
@ -111,8 +111,8 @@ type FilePath []struct {
RiskReviewStatus int `json:"risk_review_status"` RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"` ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"` ParentID string `json:"parent_id"`
CreateTime int `json:"create_time"` CreateTime int64 `json:"create_time"`
UpdateTime int `json:"update_time"` UpdateTime int64 `json:"update_time"`
} }
type GetFileUrlResp struct { type GetFileUrlResp struct {

View File

@ -192,12 +192,11 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
url := d.contentBase + "/2/files/upload_session/append_v2" url := d.contentBase + "/2/files/upload_session/append_v2"
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize)) reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize))
req, err := http.NewRequest(http.MethodPost, url, reader) req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, reader)
if err != nil { if err != nil {
log.Errorf("failed to update file when append to upload session, err: %+v", err) log.Errorf("failed to update file when append to upload session, err: %+v", err)
return err return err
} }
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken) req.Header.Set("Authorization", "Bearer "+d.AccessToken)

View File

@ -13,7 +13,7 @@ type Addition struct {
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"` ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
AccessToken string AccessToken string
RefreshToken string `json:"refresh_token" required:"true"` RefreshToken string `json:"refresh_token" required:"true"`
RootNamespaceId string RootNamespaceId string `json:"RootNamespaceId" required:"false"`
} }
var config = driver.Config{ var config = driver.Config{

View File

@ -169,13 +169,19 @@ func (d *Dropbox) getFiles(ctx context.Context, path string) ([]File, error) {
func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset int64, sessionId string) error { func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset int64, sessionId string) error {
url := d.contentBase + "/2/files/upload_session/finish" url := d.contentBase + "/2/files/upload_session/finish"
req, err := http.NewRequest(http.MethodPost, url, nil) req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken) req.Header.Set("Authorization", "Bearer "+d.AccessToken)
if d.RootNamespaceId != "" {
apiPathRootJson, err := d.buildPathRootHeader()
if err != nil {
return err
}
req.Header.Set("Dropbox-API-Path-Root", apiPathRootJson)
}
uploadFinishArgs := UploadFinishArgs{ uploadFinishArgs := UploadFinishArgs{
Commit: struct { Commit: struct {
@ -214,13 +220,19 @@ func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset
func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) { func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
url := d.contentBase + "/2/files/upload_session/start" url := d.contentBase + "/2/files/upload_session/start"
req, err := http.NewRequest(http.MethodPost, url, nil) req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
if err != nil { if err != nil {
return "", err return "", err
} }
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken) req.Header.Set("Authorization", "Bearer "+d.AccessToken)
if d.RootNamespaceId != "" {
apiPathRootJson, err := d.buildPathRootHeader()
if err != nil {
return "", err
}
req.Header.Set("Dropbox-API-Path-Root", apiPathRootJson)
}
req.Header.Set("Dropbox-API-Arg", "{\"close\":false}") req.Header.Set("Dropbox-API-Arg", "{\"close\":false}")
res, err := base.HttpClient.Do(req) res, err := base.HttpClient.Do(req)
@ -235,3 +247,11 @@ func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
_ = res.Body.Close() _ = res.Body.Close()
return sessionId, nil return sessionId, nil
} }
func (d *Dropbox) buildPathRootHeader() (string, error) {
return utils.Json.MarshalToString(map[string]interface{}{
".tag": "root",
"root": d.RootNamespaceId,
})
}

View File

@ -31,13 +31,13 @@ func (c *customTokenSource) Token() (*oauth2.Token, error) {
v.Set("client_id", c.config.ClientID) v.Set("client_id", c.config.ClientID)
v.Set("client_secret", c.config.ClientSecret) v.Set("client_secret", c.config.ClientSecret)
req, err := http.NewRequest("POST", c.config.TokenURL, strings.NewReader(v.Encode())) req, err := http.NewRequestWithContext(c.ctx, http.MethodPost, c.config.TokenURL, strings.NewReader(v.Encode()))
if err != nil { if err != nil {
return nil, err return nil, err
} }
req.Header.Set("Content-Type", "application/x-www-form-urlencoded") req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := http.DefaultClient.Do(req.WithContext(c.ctx)) resp, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -2,12 +2,15 @@ package ftp
import ( import (
"context" "context"
"errors"
"io"
stdpath "path" stdpath "path"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/jlaffaye/ftp" "github.com/jlaffaye/ftp"
) )
@ -16,6 +19,9 @@ type FTP struct {
model.Storage model.Storage
Addition Addition
conn *ftp.ServerConn conn *ftp.ServerConn
ctx context.Context
cancel context.CancelFunc
} }
func (d *FTP) Config() driver.Config { func (d *FTP) Config() driver.Config {
@ -27,12 +33,16 @@ func (d *FTP) GetAddition() driver.Additional {
} }
func (d *FTP) Init(ctx context.Context) error { func (d *FTP) Init(ctx context.Context) error {
return d._login() d.ctx, d.cancel = context.WithCancel(context.Background())
var err error
d.conn, err = d._login(ctx)
return err
} }
func (d *FTP) Drop(ctx context.Context) error { func (d *FTP) Drop(ctx context.Context) error {
if d.conn != nil { if d.conn != nil {
_ = d.conn.Logout() _ = d.conn.Quit()
d.cancel()
} }
return nil return nil
} }
@ -62,25 +72,51 @@ func (d *FTP) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]m
} }
func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if err := d.login(); err != nil { conn, err := d._login(ctx)
if err != nil {
return nil, err return nil, err
} }
remoteFile := NewFileReader(d.conn, encode(file.GetPath(), d.Encoding), file.GetSize()) path := encode(file.GetPath(), d.Encoding)
if remoteFile != nil && !d.Config().OnlyLinkMFile { size := file.GetSize()
return &model.Link{ resultRangeReader := func(context context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
RangeReader: &model.FileRangeReader{ length := httpRange.Length
RangeReaderIF: stream.RateLimitRangeReaderFunc(stream.GetRangeReaderFromMFile(file.GetSize(), remoteFile)), if length < 0 || httpRange.Start+length > size {
}, length = size - httpRange.Start
SyncClosers: utils.NewSyncClosers(remoteFile), }
var c *ftp.ServerConn
if ctx == context {
c = conn
} else {
var err error
c, err = d._login(context)
if err != nil {
return nil, err
}
}
resp, err := c.RetrFrom(path, uint64(httpRange.Start))
if err != nil {
return nil, err
}
var close utils.CloseFunc
if context == ctx {
close = resp.Close
} else {
close = func() error {
return errors.Join(resp.Close(), c.Quit())
}
}
return utils.ReadCloser{
Reader: io.LimitReader(resp, length),
Closer: close,
}, nil }, nil
} }
return &model.Link{ return &model.Link{
MFile: &stream.RateLimitFile{ RangeReader: &model.FileRangeReader{
File: remoteFile, RangeReaderIF: stream.RateLimitRangeReaderFunc(resultRangeReader),
Limiter: stream.ServerDownloadLimit,
Ctx: ctx,
}, },
SyncClosers: utils.NewSyncClosers(utils.CloseFunc(conn.Quit)),
}, nil }, nil
} }

View File

@ -33,7 +33,7 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "FTP", Name: "FTP",
LocalSort: true, LocalSort: true,
OnlyLinkMFile: true, OnlyLinkMFile: false,
DefaultRoot: "/", DefaultRoot: "/",
NoLinkURL: true, NoLinkURL: true,
} }

View File

@ -1,11 +1,8 @@
package ftp package ftp
import ( import (
"context"
"fmt" "fmt"
"io"
"os"
"sync"
"sync/atomic"
"time" "time"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight" "github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
@ -15,112 +12,32 @@ import (
// do others that not defined in Driver interface // do others that not defined in Driver interface
func (d *FTP) login() error { func (d *FTP) login() error {
err, _, _ := singleflight.ErrorGroup.Do(fmt.Sprintf("FTP.login:%p", d), func() (error, error) { _, err, _ := singleflight.AnyGroup.Do(fmt.Sprintf("FTP.login:%p", d), func() (any, error) {
return d._login(), nil var err error
if d.conn != nil {
err = d.conn.NoOp()
if err != nil {
d.conn.Quit()
d.conn = nil
}
}
if d.conn == nil {
d.conn, err = d._login(d.ctx)
}
return nil, err
}) })
return err return err
} }
func (d *FTP) _login() error { func (d *FTP) _login(ctx context.Context) (*ftp.ServerConn, error) {
conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second), ftp.DialWithContext(ctx))
if d.conn != nil {
_, err := d.conn.CurrentDir()
if err == nil {
return nil
}
}
conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second))
if err != nil { if err != nil {
return err return nil, err
} }
err = conn.Login(d.Username, d.Password) err = conn.Login(d.Username, d.Password)
if err != nil { if err != nil {
return err conn.Quit()
return nil, err
} }
d.conn = conn return conn, nil
return nil
}
// FileReader An FTP file reader that implements io.MFile for seeking.
type FileReader struct {
conn *ftp.ServerConn
resp *ftp.Response
offset atomic.Int64
readAtOffset int64
mu sync.Mutex
path string
size int64
}
func NewFileReader(conn *ftp.ServerConn, path string, size int64) *FileReader {
return &FileReader{
conn: conn,
path: path,
size: size,
}
}
func (r *FileReader) Read(buf []byte) (n int, err error) {
n, err = r.ReadAt(buf, r.offset.Load())
r.offset.Add(int64(n))
return
}
func (r *FileReader) ReadAt(buf []byte, off int64) (n int, err error) {
if off < 0 {
return -1, os.ErrInvalid
}
r.mu.Lock()
defer r.mu.Unlock()
if off != r.readAtOffset {
//have to restart the connection, to correct offset
_ = r.resp.Close()
r.resp = nil
}
if r.resp == nil {
r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
r.readAtOffset = off
if err != nil {
return 0, err
}
}
n, err = r.resp.Read(buf)
r.readAtOffset += int64(n)
return
}
func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
oldOffset := r.offset.Load()
var newOffset int64
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekCurrent:
newOffset = oldOffset + offset
case io.SeekEnd:
return r.size, nil
default:
return -1, os.ErrInvalid
}
if newOffset < 0 {
// offset out of range
return oldOffset, os.ErrInvalid
}
if newOffset == oldOffset {
// offset not changed, so return directly
return oldOffset, nil
}
r.offset.Store(newOffset)
return newOffset, nil
}
func (r *FileReader) Close() error {
if r.resp != nil {
return r.resp.Close()
}
return nil
} }

View File

@ -162,7 +162,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
SetBody(driver.NewLimitedUploadStream(ctx, stream)) SetBody(driver.NewLimitedUploadStream(ctx, stream))
}, nil) }, nil)
} else { } else {
err = d.chunkUpload(ctx, stream, putUrl) err = d.chunkUpload(ctx, stream, putUrl, up)
} }
return err return err
} }

View File

@ -5,17 +5,20 @@ import (
"crypto/x509" "crypto/x509"
"encoding/pem" "encoding/pem"
"fmt" "fmt"
"github.com/OpenListTeam/OpenList/v4/internal/op" "io"
"net/http" "net/http"
"os" "os"
"regexp" "regexp"
"strconv" "strconv"
"time" "time"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/avast/retry-go"
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
"github.com/golang-jwt/jwt/v4" "github.com/golang-jwt/jwt/v4"
@ -251,28 +254,60 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
return res, nil return res, nil
} }
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error { func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
var defaultChunkSize = d.ChunkSize * 1024 * 1024 var defaultChunkSize = d.ChunkSize * 1024 * 1024
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)
if err != nil {
return err
}
var offset int64 = 0 var offset int64 = 0
for offset < stream.GetSize() { url += "?includeItemsFromAllDrives=true&supportsAllDrives=true"
for offset < file.GetSize() {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
} }
chunkSize := stream.GetSize() - offset chunkSize := min(file.GetSize()-offset, defaultChunkSize)
if chunkSize > defaultChunkSize { reader, err := ss.GetSectionReader(offset, chunkSize)
chunkSize = defaultChunkSize
}
reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize})
if err != nil { if err != nil {
return err return err
} }
reader = driver.NewLimitedUploadStream(ctx, reader) limitedReader := driver.NewLimitedUploadStream(ctx, reader)
_, err = d.request(url, http.MethodPut, func(req *resty.Request) { err = retry.Do(func() error {
req.SetHeaders(map[string]string{ reader.Seek(0, io.SeekStart)
"Content-Length": strconv.FormatInt(chunkSize, 10), req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, limitedReader)
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()), if err != nil {
}).SetBody(reader).SetContext(ctx) return err
}, nil) }
req.Header = map[string][]string{
"Authorization": {"Bearer " + d.AccessToken},
"Content-Length": {strconv.FormatInt(chunkSize, 10)},
"Content-Range": {fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, file.GetSize())},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
var e Error
utils.Json.Unmarshal(bytes, &e)
if e.Error.Code != 0 {
if e.Error.Code == 401 {
err = d.refreshToken()
if err != nil {
return err
}
}
return fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
}
up(float64(offset+chunkSize) / float64(file.GetSize()) * 100)
return nil
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
ss.FreeSectionReader(reader)
if err != nil { if err != nil {
return err return err
} }

View File

@ -276,9 +276,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
etag := s.GetHash().GetHash(utils.MD5) etag := s.GetHash().GetHash(utils.MD5)
var err error var err error
if len(etag) != utils.MD5.Width { if len(etag) != utils.MD5.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50) _, etag, err = stream.CacheFullAndHash(s, &up, utils.MD5)
up = model.UpdateProgressWithRange(up, 50, 100)
_, etag, err = stream.CacheFullInTempFileAndHash(s, cacheFileProgress, utils.MD5)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -298,6 +296,23 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
return nil, err return nil, err
} }
upToken := utils.Json.Get(res, "upToken").ToString() upToken := utils.Json.Get(res, "upToken").ToString()
if upToken == "-1" {
// 支持秒传
var resp UploadTokenRapidResp
err := utils.Json.Unmarshal(res, &resp)
if err != nil {
return nil, err
}
return &model.Object{
ID: strconv.FormatInt(resp.Map.FileID, 10),
Name: resp.Map.FileName,
Size: s.GetSize(),
Modified: s.ModTime(),
Ctime: s.CreateTime(),
IsFolder: false,
HashInfo: utils.NewHashInfo(utils.MD5, etag),
}, nil
}
now := time.Now() now := time.Now()
key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli()) key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli())
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{

View File

@ -29,9 +29,10 @@ func init() {
op.RegisterDriver(func() driver.Driver { op.RegisterDriver(func() driver.Driver {
return &ILanZou{ return &ILanZou{
config: driver.Config{ config: driver.Config{
Name: "ILanZou", Name: "ILanZou",
DefaultRoot: "0", DefaultRoot: "0",
LocalSort: true, LocalSort: true,
NoOverwriteUpload: true,
}, },
conf: Conf{ conf: Conf{
base: "https://api.ilanzou.com", base: "https://api.ilanzou.com",
@ -47,9 +48,10 @@ func init() {
op.RegisterDriver(func() driver.Driver { op.RegisterDriver(func() driver.Driver {
return &ILanZou{ return &ILanZou{
config: driver.Config{ config: driver.Config{
Name: "FeijiPan", Name: "FeijiPan",
DefaultRoot: "0", DefaultRoot: "0",
LocalSort: true, LocalSort: true,
NoOverwriteUpload: true,
}, },
conf: Conf{ conf: Conf{
base: "https://api.feijipan.com", base: "https://api.feijipan.com",

View File

@ -43,6 +43,18 @@ type Part struct {
ETag string `json:"etag"` ETag string `json:"etag"`
} }
type UploadTokenRapidResp struct {
Msg string `json:"msg"`
Code int `json:"code"`
UpToken string `json:"upToken"`
Map struct {
FileIconID int `json:"fileIconId"`
FileName string `json:"fileName"`
FileIcon string `json:"fileIcon"`
FileID int64 `json:"fileId"`
} `json:"map"`
}
type UploadResultResp struct { type UploadResultResp struct {
Msg string `json:"msg"` Msg string `json:"msg"`
Code int `json:"code"` Code int `json:"code"`

View File

@ -3,6 +3,7 @@ package LenovoNasShare
import ( import (
"context" "context"
"net/http" "net/http"
"net/url"
"strings" "strings"
"time" "time"
@ -71,7 +72,23 @@ func (d *LenovoNasShare) List(ctx context.Context, dir model.Obj, args model.Lis
files = append(files, resp.Data.List...) files = append(files, resp.Data.List...)
return utils.SliceConvert(files, func(src File) (model.Obj, error) { return utils.SliceConvert(files, func(src File) (model.Obj, error) {
return src, nil if src.IsDir() {
return src, nil
}
return &model.ObjThumb{
Object: model.Object{
Name: src.GetName(),
Size: src.GetSize(),
Modified: src.ModTime(),
IsFolder: src.IsDir(),
},
Thumbnail: model.Thumbnail{
Thumbnail: func() string {
thumbUrl := d.Host + "/oneproxy/api/share/v1/file/thumb?code=" + d.ShareId + "&stoken=" + d.stoken + "&path=" + url.QueryEscape(src.GetPath())
return thumbUrl
}(),
},
}, nil
}) })
} }

View File

@ -0,0 +1,92 @@
package local
// TestDirCalculateSize tests the directory size calculation
// It should be run with the local driver enabled and directory size calculation set to true
import (
"os"
"path/filepath"
"strconv"
"testing"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
)
func generatedTestDir(dir string, dep, filecount int) {
if dep == 0 {
return
}
for i := 0; i < dep; i++ {
subDir := dir + "/dir" + strconv.Itoa(i)
os.Mkdir(subDir, 0755)
generatedTestDir(subDir, dep-1, filecount)
generatedFiles(subDir, filecount)
}
}
func generatedFiles(path string, count int) error {
for i := 0; i < count; i++ {
filePath := filepath.Join(path, "file"+strconv.Itoa(i)+".txt")
file, err := os.Create(filePath)
if err != nil {
return err
}
// 使用随机ascii字符填充文件
content := make([]byte, 1024) // 1KB file
for j := range content {
content[j] = byte('a' + j%26) // Fill with 'a' to 'z'
}
_, err = file.Write(content)
if err != nil {
return err
}
file.Close()
}
return nil
}
// performance tests for directory size calculation
func BenchmarkCalculateDirSize(t *testing.B) {
// 初始化t的日志
t.Logf("Starting performance test for directory size calculation")
// 确保测试目录存在
if testing.Short() {
t.Skip("Skipping performance test in short mode")
}
// 创建tmp directory for testing
testTempDir := t.TempDir()
err := os.MkdirAll(testTempDir, 0755)
if err != nil {
t.Fatalf("Failed to create test directory: %v", err)
}
defer os.RemoveAll(testTempDir) // Clean up after test
// 构建一个深度为5每层10个文件和10个目录的目录结构
generatedTestDir(testTempDir, 5, 10)
// Initialize the local driver with directory size calculation enabled
d := &Local{
directoryMap: DirectoryMap{
root: testTempDir,
},
Addition: Addition{
DirectorySize: true,
RootPath: driver.RootPath{
RootFolderPath: testTempDir,
},
},
}
//record the start time
t.StartTimer()
// Calculate the directory size
err = d.directoryMap.RecalculateDirSize()
if err != nil {
t.Fatalf("Failed to calculate directory size: %v", err)
}
//record the end time
t.StopTimer()
// Print the size and duration
node, ok := d.directoryMap.Get(d.directoryMap.root)
if !ok {
t.Fatalf("Failed to get root node from directory map")
}
t.Logf("Directory size: %d bytes", node.fileSum+node.directorySum)
t.Logf("Performance test completed successfully")
}

Some files were not shown because too many files have changed in this diff Show More