Compare commits

..

81 Commits

Author SHA1 Message Date
e97f0a289e feat(cloudreve_v4): enhance token management (#1171)
* fix(cloudreve_v4): improve error handling in request method

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* feat(cloudreve_v4): enhance token management with expiration checks and refresh logic

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* feat(cloudreve_v4): add JWT structures for access and refresh tokens; validate access token on initialization

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(cloudreve_v4): improve error messages

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-09-04 19:41:41 +08:00
89f35170b3 fix(fs): clear cache after directory rename to ensure consistency (#1193)
Clear cache after renaming the directory.
2025-09-01 18:47:54 +08:00
8188fb2d7d fix(123open): get direct link (#1185)
* fix(123open): correct query parameter name from 'fileId' to 'fileID' in getDirectLink function

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123open): change SpaceTempExpr type from 'string' to 'int64' in UserInfoResp struct

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123open): comment out unused fields in UserInfoResp struct

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123open): add getUID method and cache UID

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-31 15:47:38 +08:00
87cf95f50b fix(139): refactor part upload logic (#1184)
* fix(139): refactor part upload logic

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): handle upload errors

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): sort upload parts by PartNumber before uploading

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): improve error handling

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): add validation for upload part index to prevent out of bounds errors

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-31 15:47:12 +08:00
8ab26cb823 fix(123open): change DirectLink type from 'boolean' to 'bool' (#1180)
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-29 19:06:37 +08:00
5880c8e1af fix(189tv): use rate-limited upload stream in OldUpload function (#1176)
* fix(189tv): use rate-limited upload stream in OldUpload function

* fix(189tv): wrap tempFile with io.NopCloser to prevent premature closure in OldUpload function

* .
2025-08-29 16:01:50 +08:00
14bf4ecb4c fix(share): support custom proxy url (#1130)
* feat(share): support custom proxy url

* fix(share): count access

* fix: maybe a path traversal vulnerability?
2025-08-28 22:11:19 +08:00
04a5e58781 fix(server): can't edit .md source files (#1159)
* fix(server): can't edit .md source files

* chore

* add ignore direct link args
2025-08-28 16:19:57 +08:00
bbd4389345 fix(wopan): use fixed timezone for parsing time (#1170)
fix(wopan): update getTime function to use fixed timezone for parsing

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-28 13:02:02 +08:00
f350ccdf95 fix(189pc): sliceSize must not be equal to fileSize (#1169)
* fix(189pc): sliceSize not equal to fileSize

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* Update comment for sliceSize parameter

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-28 11:32:40 +08:00
4f2de9395e feat(degoo): token improvement (#1149)
* Update driver.go

Signed-off-by: Caspian <app@caspian.im>

* Update meta.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* make account optional

* ensure username and password

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: Caspian <app@caspian.im>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-26 01:22:59 +08:00
b0dbbebfb0 feat(drivers): add Teldrive driver (#1116)
https://github.com/tgdrive/teldrive

https://teldrive-docs.pages.dev/docs/api

实现:
* copy
* move
* link (302 share and local proxy)
* chunked uploads
* rename

未实现:
- openlist扫码登陆
- refresh token

https://github.com/OpenListTeam/OpenList-Docs/pull/155


* feat(Teldrive): Add driver Teldrive

* fix(teldrive): force webproxy and memory optimized

* chore(teldrive): go fmt

* chore(teldrive): remove TODO

* chore(teldrive): organize code

* feat(teldrive): add UseShareLink option and support 302

* fix(teldrive): standardize API path construction

* fix(teldrive): trim trailing slash from Address in Init method

* chore(teldrive): update help text for UseShareLink field in Addition struct

* fix(teldrive): set 10 MiB as default chunk size

---------

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: ILoveScratch <ilovescratch@foxmail.com>
2025-08-25 01:34:08 +08:00
0c27b4bd47 docs(contributing): update guidelines (#983)
[skip ci]

* docs(contributing): update guidelines

* docs(contributing): clarify fork

* docs(contributing): sync translation

Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* docs(contributing): add label and cc reminder

* docs(contributing): remove ensure new branch from checklist

* docs(contributing): replace generic GitHub URLs with user-specific ones

* docs(contributing): make branch deletion after PR merge optional

* docs(contributing): keep --recurse-submodules

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
2025-08-24 20:13:11 +08:00
736cd9e5f2 fix(quark): fix getTranscodingLink (#1136)
The first video info may not contain url

* fix(quark): fix getTranscodingLink

* fix(quark_tv): fix getTranscodingLink
2025-08-24 19:55:10 +08:00
c7a603c926 fix(115): fix get 115 app version (#1137) 2025-08-24 19:50:21 +08:00
a28d6d5693 fix(123_open): fix token refresh (#1121) 2025-08-23 23:01:41 +08:00
e59d2233e2 feat(drivers): add Degoo driver (#1097)
* Create driver.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Create util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Create types.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Create meta.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update drivers/degoo/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: CaspianGUAN <app@caspian.im>

* Update drivers/degoo/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: CaspianGUAN <app@caspian.im>

* Update driver.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update meta.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update types.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update driver.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update drivers/degoo/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: CaspianGUAN <app@caspian.im>

* Update util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* refactor(degoo): add Degoo driver integration and update API handling

* fix(degoo): apply suggestions

---------

Signed-off-by: CaspianGUAN <app@caspian.im>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-23 22:47:02 +08:00
01914a06ef refactor(ci): add permissions check at docker's entrypoint (#1128)
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-22 19:35:48 +08:00
6499374d1c fix(deps): update 115driver to v1.1.1 (close SheltonZhu/115driver#57) (#1115) 2025-08-20 21:33:21 +08:00
b054919d5c feat(ilanzou): add support for rapid upload and fix duplication handling (#1065)
* feat(ilanzou): add support for rapid upload token handling

* feat(ilanzou): add NoOverwriteUpload option
2025-08-19 19:19:44 +08:00
048ee9c2e5 feat(server): adapting #1099 to #991 (#1102) 2025-08-19 15:48:59 +08:00
23394548ca feat(123_open): add DirectLink option (#1045)
* feat(123_open): add `UseDirectLink` option

* feat(123_open): update rate limit rules

* fix(123_open): update api

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* feat(123_open): enhance direct link functionality with private key and expiration

* refactor(123_open): use UUID for random generation

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-19 15:23:10 +08:00
b04677b806 feat(server): add error page and status code (#1099) 2025-08-19 15:18:12 +08:00
e4c902dd93 feat(share): support more secure file sharing (#991)
提供一种类似大多数网盘的文件分享操作,这种分享方式可以通过强制 Web 代理隐藏文件源路径,可以设置分享码、最大访问数和过期时间,并且不需要启用 guest 用户。

在全局设置中可以调整:
- 是否强制 Web 代理
- 是否允许预览
- 是否允许预览压缩文件
- 分享文件后,点击“复制链接”按钮复制的内容

前端部分:OpenListTeam/OpenList-Frontend#156
文档部分:OpenListTeam/OpenList-Docs#130

Close #183
Close #526
Close #860
Close #892
Close #1079


* feat(share): support more secure file sharing

* feat(share): add archive preview

* fix(share): fix some bugs

* feat(openlist_share): add openlist share driver

* fix(share): lack unwrap when get virtual path

* fix: use unwrapPath instead of path for virtual file name comparison

* fix(share): change request method of /api/share/list from GET to Any

* fix(share): path traversal vulnerability in sharing path check

* 修复分享alias驱动的文件 没开代理时无法获取URL

* fix(sharing): update error message for sharing root link extraction

---------

Co-authored-by: Suyunmeng <69945917+Suyunmeng@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-08-19 15:10:02 +08:00
5d8bd258c0 refactor(docker): reduce docker image size (#1091)
* fix(docker): reduce image size

* refactor(docker): update user and group creation

* Update Dockerfile

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-19 10:27:33 +08:00
08c5283c8c feat(docker): Update docker-compose configuration (#1081)
* feat(docker): Update docker-compose configuration

* Update docker-compose.yml

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: huancun _- <huancun@hc26.org>

---------

Signed-off-by: huancun _- <huancun@hc26.org>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-18 14:29:59 +08:00
10a14f10cd fix(docker): improve startup process and SIGTERM handling (#1089)
* fix(ci): Modify the way of star OpenList.

* fix(ci): start runsvdir
2025-08-18 11:13:05 +08:00
f86ebc52a0 refactor(123_open): improve upload (#1076)
* refactor(123_open): improve upload

* optimize buffer initialization for multipart form

* 每次重试生成新的表单

* .
2025-08-17 14:25:23 +08:00
016ed90efa feat(stream): fast buffer freeing for large cache (#1053)
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-16 17:19:52 +08:00
d76407b201 fix(dropbox): incorrect path error during upload (#1052)
* Fix incorrect path error during upload on Dropbox

* Add RootNamespaceId to the config for direct modification

* Refactor Dropbox header logic: extract JSON marshaling into helper method

* Fix Dropbox: replace marshalToJSONString with utils.Json.MarshalToString
2025-08-16 14:18:02 +08:00
5de6b660f2 fix(terabox): user not exists error (#1056)
* fix user location error when upload file
2025-08-15 21:25:57 +08:00
71ada3b656 fix(ci-sync): fix workflow for syncing Repository (#1062) 2025-08-15 18:48:55 +08:00
dc42f0e226 [skip ci]fix(ci): update sync workflow (#1061) 2025-08-15 18:36:52 +08:00
74bf9f6467 [skip ci]feat(sync): add workflow to sync GitHub repository (#1060)
feat(sync): add workflow to sync GitHub repository to Gitee
2025-08-15 18:12:29 +08:00
d0c22a1ecb feat(ci): add the default user for docker image (#1036)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-12 09:51:40 +08:00
57fceabcf4 perf(stream): improve file stream range reading and caching mechanism (#1001)
* perf(stream): improve file stream range reading and caching mechanism

* 。

* add bytes_test.go

* fix(stream): handle EOF and buffer reading more gracefully

* 注释

* refactor: update CacheFullAndWriter to accept pointer for UpdateProgress

* update tests

* Update drivers/google_drive/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>

* 更优雅的克隆Link

* 修复stream已缓存但无法重复读取

* 将Bytes类型重命名为Reader

* 修复栈溢出

* update tests

---------

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-11 23:41:22 +08:00
8c244a984d refactor(assets): migrate to resource domain (#975)
* refactor(assets): migrate to resource domain

* feat(bootstrap): add migration value for logo and favicon settings
2025-08-10 09:57:33 +08:00
df479ba806 fix(aliyundrive_open): limit rate for every request (close #724) (#1011)
* fix(aliyundrive_open): limit rate for `Remove` and `MakeDir`; reduce limit for `List` and `Link` (close #724)

* Update drivers/aliyundrive_open/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: 火星大王 <34576789+huoxingdawang@users.noreply.github.com>

* Update drivers/aliyundrive_open/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: 火星大王 <34576789+huoxingdawang@users.noreply.github.com>

* fix(aliyundrive_open): limit rate for every request

* fix(aliyundrive_open): fix limiter not work on reference driver

* fix(aliyundrive_open): typo

* fix(aliyundrive_open): limiter not set to nil after free

* fix(aliyundrive_share): limit rate for every request

---------

Signed-off-by: 火星大王 <34576789+huoxingdawang@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-10 09:55:20 +08:00
5ae8e96237 feat(123_open): update Put method to return model.Obj (#1008)
* feat(123_open): update Put method to return model.Obj

* fix(123_open): declear time zones

* chore(123_open): fix typo

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123_open): use fixed timezone

* fix(123_open): implement PutResult interface for Open123 driver

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: Suyunmeng <69945917+Suyunmeng@users.noreply.github.com>
2025-08-09 15:09:12 +08:00
aa0ced47b0 fix(webdav): Handle HEAD requests for directories with appropriate headers (#1015)
Implement handling of HEAD requests for directories by setting the correct Content-Type and Content-Length headers.
2025-08-09 13:57:09 +08:00
ab747d9052 feat(config): Add PWA manifest.json endpoint for web app installation (#990)
* feat(config): Add PWA manifest.json endpoint for web app installation

* fix: Update comment to English in manifest handler

* fix: fix EOL

* fix: Remove unused fmt import from manifest handler

* feat: use site settings for manifest name and icon

* fix(manifest): Move manifest.json route to static handler for proper CDN handling

* feat: move manifest.json handler to static package and improve path handling

* feat: Add custom static file handler to prevent manifest.json conflicts

* fix: Integrate manifest.json handling into static file serving routes

* fix: Simplify PWA manifest scope handling and static file serving

- Remove CDN-specific logic for PWA manifest scope and start_url
- Always use base path for PWA scope regardless of CDN configuration
- Replace manual file serving logic with http.FileServer for static assets

* fix: Ensure consistent base path handling in site configuration and manifest path construction

* fix: Refactor trailing slash handling in site configuration

* feat(static): update manifest path handling and add route for manifest.json
2025-08-08 20:07:51 +08:00
93c06213d4 feat(local): add directory size support (#624)
* feat(local): add directory size support

* fix(local): fix and improve directory size calculation

* style(local): fix code style

* style(local): fix code style

* style(local): fix code style

* fix(local): refresh directory size when force refresh

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>

* fix:(local): Avoid traversing the parent's parent, which leads to an endless loop

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>

* fix(local:) refresh dir size only enabled

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>

* fix(local): logical error && add RecalculateDirSize && cleaner code for int64

* feat(local): add Benchmark for CalculateDirSize

* refactor(local): 优化移动中对于错误的判断。

---------

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>
Co-authored-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>
2025-08-08 16:59:16 +08:00
b9b8eed285 [skip ci]feat(ci): add FRONTEND_REPO variable to workflows and build script (#1006) 2025-08-08 16:36:22 +08:00
317d190b77 fix(ftp): create a new connection for each download (#989) 2025-08-06 20:35:01 +08:00
52d7d819ad feat(lenovonas_share): add thumb (#986) 2025-08-06 17:34:43 +08:00
0483e0f868 feat(driver_strm): also shown some files with strm (#969)
* feat(driver_strm): Also shown some files with strm

Allow user set some file types that need to shown with strm, usually subtitles

Most of code was copy and managed from drivers/alias

* 优化

* 优化

* 。

* 添加注释

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
2025-08-06 15:40:48 +08:00
08dae4f55f feat(123_open): update upload api v2 (#976) 2025-08-06 15:27:13 +08:00
9ac0484bc0 perf(ftp): improve concurrent Link response; fix alias/local driver issues (#974) 2025-08-06 13:32:37 +08:00
8cf15183a0 perf: optimize upload (#554)
* pref(115,123): optimize upload

* chore

* aliyun_open, google_drive

* fix bug

* chore

* cloudreve, cloudreve_v4, onedrive, onedrive_app

* chore(conf): add `max_buffer_limit` option

* 123pan multithread upload

* doubao

* google_drive

* chore

* chore

* chore: 计算分片数量的代码

* MaxBufferLimit自动挡

* MaxBufferLimit自动挡

* 189pc

* errorgroup添加Lifecycle

* 查缺补漏

* Conf.MaxBufferLimit单位为MB

* 。

---------

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-05 21:42:54 +08:00
c8f2aaaa55 feat(cmd): add delete command for storage (#952) 2025-08-04 17:30:43 +08:00
1208bd0a83 fix(fs): nil interface not equal to nil (#971)
https://go.dev/doc/faq#nil_error
2025-08-03 23:51:11 +08:00
6b096bcad4 fix(fs): deadlock when get link error (#963) 2025-08-02 17:49:53 +08:00
58dbf088f9 fix(fs): forget cache when get link error (#956) 2025-08-02 11:03:34 +08:00
05ff7908f2 fix(strm): encoded path is ineffective (#951) 2025-08-02 00:23:18 +08:00
a703b736c9 feat(offline_download): filter empty URLs in offline download requests (#948) 2025-08-01 16:12:21 +08:00
e458f2ab53 fix(bootstrap): add newline after initial admin password output (#943)
fix(bootstrap): add newline after initial admin  password output
2025-08-01 13:43:41 +08:00
a5a22e7085 fix(local): Treat junction as directory in Windows. (#809)
Treat junction as directory in Windows.
2025-07-31 13:54:56 +08:00
9469c95b14 fix(security): potential XSS vulnerabilities (#896) 2025-07-31 12:57:20 +08:00
cf912dcf7a fix(cmd): output to console (#920)
fix(cmd): output to terminal
2025-07-31 11:44:00 +08:00
ccd4af26e5 feat(patch): add migration from Alist V3 driver to OpenList (#919)
* feat(patch): add migration from Alist V3 driver to OpenList

* chore(patch): improve logging
2025-07-31 11:43:21 +08:00
1682e873d6 feat(search): enhanced meilisearch search experience (#864)
* feat(search): enhanced `meilisearch` search experience
- upgrade `meilisearch` dependency
- support subdirectory search
- optimize searchDocument fields for subdirectory search
- specify full index uid instead of index prefix

* fix(search): more fixes to `meilisearch`
- make use of context where context was not used
- remove code of waiting task in deletion process, as tasks are queued and will be executed orderly (if tasks were submitted to the queue successfully), which can improve `AutoUpdate` performance
2025-07-31 11:24:22 +08:00
54ae7e6d9b feat(115_open): Add GetObjInfo to accelerate getting link (#888)
* feat(115_open): Add GetObjInfo to accelerate getting link

* feat(fs): use cache directly when cache exist
2025-07-31 11:20:02 +08:00
991da7d87f feat(strm): add local mode (#885)
* feat(strm): add local mode

* Update drivers/strm/meta.go

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>

* feat(strm): local mode add sign

---------

Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-31 11:18:59 +08:00
Dgs
a498091aef fix(123&&123_share): fix link request header referer (#915) 2025-07-31 10:10:38 +08:00
976c82bb2b fix(drivers): update time-related fields to int64 (#913)
- In doubao/types.go:
  - Change LastUpdateTime from int to int64
  - Change UserCreateTime from int to int64
- In doubao_share/types.go:
  - Change CreateTime and UpdateTime from int to int64 in ShareInfo and FilePath
- In quark_uc/types.go:
  - Change UpdateTime from int to int64 in TranscodingResp

These changes ensure consistent and accurate representation of timestamp data across the project.
2025-07-31 10:10:32 +08:00
5b41a3bdff feat(ci): Add support for LoongArch64 architecture builds (#907) 2025-07-31 10:10:19 +08:00
19d1a3b785 refactor(ci): Refactor Docker build to use base images and dynamic Dockerfile generation (#904) 2025-07-30 15:04:29 +08:00
3c7b0c4999 fix(qb): Configure HTTP client with connection pooling and fix resource leaks in qBittorrent client. (#898) 2025-07-29 21:56:36 +08:00
d6867b4ab6 fix(user): show admin password on first start (#883)
* fix: fix admin password not shown in first start
* chore: add time dependence

Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>

* fix: fix log format

Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>

---------

Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>
Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
2025-07-29 21:36:27 +08:00
11cf561307 fix(security): potential XSS vulnerabilities (#880)
* fix(security): potential XSS vulnerabilities

* chore: replace alist identifier to openlist identifier

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>

---------

Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>
Co-authored-by: ILoveScratch <ilovescratch@foxmail.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-29 20:17:11 +08:00
239b58f63e fix(ci):Disable linux/s390x Docker builds (#887) 2025-07-29 16:22:50 +08:00
7da06655cb feat(setting): add site version information (#859)
* feat(setting): add site version information

* feat(conf): update conf.WebVersion to rolling

* fix(static): update condition to check conf.Version instead of conf.WebVersion

* fix(build.sh): use rolling release for web frontend in dev and beta builds

* chore(build.sh): update GitAuthor to The OpenList Projects Contributors

* fix(static): update condition to check conf.WebVersion
2025-07-29 09:49:33 +08:00
e0b3a611ba feat(thunderx,pikpak): add offline download support for ThunderX; add ctx to specific PikPak functions (#879)
* feat(thunderx,pikpak): add offline download support for ThunderX; add ctx to specific PikPak functions

* Update internal/offline_download/tool/download.go

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: 花月喵梦 <152958106+nekohy@users.noreply.github.com>

---------

Signed-off-by: 花月喵梦 <152958106+nekohy@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-29 09:46:28 +08:00
be1ad08a83 feat(ci):Add Windows 7 and LoongArch Release build support (#857)
* feat:Add Windows 7 and LoongArch old world build support (#30)

* feat:Add Windows 7 and Loongson old world build support

- Add BuildWin7() function with patched Go compiler for Windows 7 compatibility
- Add BuildLoongOldWorld() function for linux-loong64-abi1.0 target
- Create Zig-based wrapper scripts for Windows 7 cross-compilation
- Integrate new build functions into existing release workflows

* fix(win7):Add MinGW-w64 toolchain and improve LoongArch ABI isolation

- Install MinGW-w64 cross-compilation toolchain for Win7 compatibility
- Replace Zig compiler wrappers with MinGW-w64 for Windows 7 builds
- Add Go build cache cleaning to prevent LoongArch ABI1.0/ABI2.0 cross-contamination
- Force clean rebuilds (-a flag) for LoongArch builds to ensure ABI compatibility

* feat: add Windows 7 build support to beta release workflow

* feat: add LoongArch ABI2.0 support alongside existing ABI1.0 build (#31)

- Add BuildWin7() function with patched Go compiler for Windows 7 compatibility
- Add BuildLoongOldWorld() function for linux-loong64-abi1.0 target
- Create Zig-based wrapper scripts for Windows 7 cross-compilation
- Integrate new build functions into existing release workflows
- Install MinGW-w64 cross-compilation toolchain for Win7 compatibility
- Replace Zig compiler wrappers with MinGW-w64 for Windows 7 builds
- Add Go build cache cleaning to prevent LoongArch ABI1.0/ABI2.0 cross-contamination
- Force clean rebuilds (-a flag) for LoongArch builds to ensure ABI compatibility

* [skip ci]refactor:Refactor LoongArch builds to separate glibc from musl compilation

* fix(go-cache):Improve error handling for Go module cache cleaning in LoongArch builds

* feat(build): Enhance LoongArch build process with improved toolchain setup and cache management

* fix(build): Update Windows 7 target naming in build scripts and workflows

* refactor(build): Replace MinGW-w64 with Zig for Windows 7 toolchain in build scripts

* chore(cgo): remove cgo-actions subproject
2025-07-27 00:27:31 +08:00
4e9c30f49d feat(fs): full support webdav cross-driver copy and move (#823)
* fix(fs): restore webdav cross-driver copy and move

* fix bug

* webdav支持复制、移动 文件夹

* 优化

* 。
2025-07-26 00:27:46 +08:00
0ee31a3f36 fix(crypt): wrong ContentLength 2025-07-25 19:55:22 +08:00
23bddf991e feat(drivers): enable local sorting for cloudreve, ilanzou (#840)
* feat(cloudreve): enable local sorting

* feat(ilanzou): enable local sorting
2025-07-25 18:01:19 +08:00
da8d6607cf fix(static): support logo replacement (#834 Close #754) 2025-07-25 17:12:51 +08:00
6134574dac fix(fs): rename bug (#832)
* fix(fs): rename bug

* chore

* fix bug

* .

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-25 13:42:39 +08:00
b273232f87 refactor(log): redir utils.Log to logrus after init (#833) 2025-07-25 13:38:45 +08:00
358e4d851e refactor(log): filter (#816) 2025-07-25 11:33:27 +08:00
211 changed files with 8869 additions and 2780 deletions

56
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,56 @@
<!--
Provide a general summary of your changes in the Title above.
The PR title must start with `feat(): `, `docs(): `, `fix(): `, `style(): `, or `refactor(): `, `chore(): `. For example: `feat(component): add new feature`.
If it spans multiple components, use the main component as the prefix and enumerate in the title, describe in the body.
-->
<!--
在上方标题中提供您更改的总体摘要。
PR 标题需以 `feat(): `, `docs(): `, `fix(): `, `style(): `, `refactor(): `, `chore(): ` 其中之一开头,例如:`feat(component): 新增功能`
如果跨多个组件,请使用主要组件作为前缀,并在标题中枚举、描述中说明。
-->
## Description / 描述
<!-- Describe your changes in detail -->
<!-- 详细描述您的更改 -->
## Motivation and Context / 背景
<!-- Why is this change required? What problem does it solve? -->
<!-- 为什么需要此更改?它解决了什么问题? -->
<!-- If it fixes an open issue, please link to the issue here. -->
<!-- 如果修复了一个打开的issue请在此处链接到该issue -->
Closes #XXXX
<!-- or -->
<!-- 或者 -->
Relates to #XXXX
## How Has This Been Tested? / 测试
<!-- Please describe in detail how you tested your changes. -->
<!-- 请详细描述您如何测试更改 -->
## Checklist / 检查清单
<!-- Go over all the following points, and put an `x` in all the boxes that apply. -->
<!-- 检查以下所有要点,并在所有适用的框中打`x` -->
<!-- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
<!-- 如果您对其中任何一项不确定,请不要犹豫提问。我们会帮助您! -->
- [ ] I have read the [CONTRIBUTING](https://github.com/OpenListTeam/OpenList/blob/main/CONTRIBUTING.md) document.
我已阅读 [CONTRIBUTING](https://github.com/OpenListTeam/OpenList/blob/main/CONTRIBUTING.md) 文档。
- [ ] I have formatted my code with `go fmt` or [prettier](https://prettier.io/).
我已使用 `go fmt` 或 [prettier](https://prettier.io/) 格式化提交的代码。
- [ ] I have added appropriate labels to this PR (or mentioned needed labels in the description if lacking permissions).
我已为此 PR 添加了适当的标签(如无权限或需要的标签不存在,请在描述中说明,管理员将后续处理)。
- [ ] I have requested review from relevant code authors using the "Request review" feature when applicable.
我已在适当情况下使用"Request review"功能请求相关代码作者进行审查。
- [ ] I have updated the repository accordingly (If its needed).
我已相应更新了相关仓库(若适用)。
- [ ] [OpenList-Frontend](https://github.com/OpenListTeam/OpenList-Frontend) #XXXX
- [ ] [OpenList-Docs](https://github.com/OpenListTeam/OpenList-Docs) #XXXX

View File

@ -61,7 +61,7 @@ jobs:
strategy:
matrix:
include:
- target: "!(*musl*|*windows-arm64*|*android*|*freebsd*)" # xgo and loongarch
- target: "!(*musl*|*windows-arm64*|*windows7-*|*android*|*freebsd*)" # xgo and loongarch
hash: "md5"
- target: "linux-!(arm*)-musl*" #musl-not-arm
hash: "md5-linux-musl"
@ -69,6 +69,8 @@ jobs:
hash: "md5-linux-musl-arm"
- target: "windows-arm64" #win-arm64
hash: "md5-windows-arm64"
- target: "windows7-*" #win7
hash: "md5-windows7"
- target: "android-*" #android
hash: "md5-android"
- target: "freebsd-*" #freebsd
@ -91,6 +93,7 @@ jobs:
run: bash build.sh dev web
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Build
uses: OpenListTeam/cgo-actions@v1.2.2
@ -103,10 +106,10 @@ jobs:
musl-base-url: "https://github.com/OpenListTeam/musl-compilers/releases/latest/download/"
x-flags: |
github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=OpenList
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=The OpenList Projects Contributors <noreply@openlist.team>
github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit
github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=dev
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=rolling
- name: Compress
run: |

View File

@ -39,6 +39,7 @@ jobs:
run: bash build.sh dev web
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Build
uses: OpenListTeam/cgo-actions@v1.2.2
@ -49,10 +50,10 @@ jobs:
out-dir: build
x-flags: |
github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=OpenList
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=The OpenList Projects Contributors <noreply@openlist.team>
github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit
github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=dev
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=rolling
output: openlist$ext
- name: Upload artifact

View File

@ -66,6 +66,7 @@ jobs:
bash build.sh release ${{ matrix.build-type == 'lite' && 'lite' || '' }} ${{ matrix.target-platform }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload assets
uses: softprops/action-gh-release@v2

View File

@ -31,7 +31,7 @@ env:
REGISTRY: ghcr.io
ARTIFACT_NAME: 'binaries_docker_release'
ARTIFACT_NAME_LITE: 'binaries_docker_release_lite'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/ppc64le,linux/riscv64,linux/loong64' ### Temporarily disable Docker builds for linux/s390x architectures for unknown reasons.
IMAGE_PUSH: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }}
permissions:
@ -66,6 +66,7 @@ jobs:
run: bash build.sh release docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
@ -105,6 +106,7 @@ jobs:
run: bash build.sh release lite docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
@ -125,15 +127,19 @@ jobs:
image: ["latest", "ffmpeg", "aria2", "aio"]
include:
- image: "latest"
base_image_tag: "base"
build_arg: ""
tag_favor: ""
- image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-ffmpeg,onlatest=true"
- image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-aria2,onlatest=true"
- image: "aio"
base_image_tag: "aio"
build_arg: |
INSTALL_FFMPEG=true
INSTALL_ARIA2=true
@ -189,7 +195,9 @@ jobs:
context: .
file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: ${{ matrix.build_arg }}
build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }}
@ -203,15 +211,19 @@ jobs:
image: ["latest", "ffmpeg", "aria2", "aio"]
include:
- image: "latest"
base_image_tag: "base"
build_arg: ""
tag_favor: "suffix=-lite,onlatest=true"
- image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-lite-ffmpeg,onlatest=true"
- image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-lite-aria2,onlatest=true"
- image: "aio"
base_image_tag: "aio"
build_arg: |
INSTALL_FFMPEG=true
INSTALL_ARIA2=true
@ -267,7 +279,9 @@ jobs:
context: .
file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: ${{ matrix.build_arg }}
build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }}
platforms: ${{ env.RELEASE_PLATFORMS }}

38
.github/workflows/sync_repo.yml vendored Normal file
View File

@ -0,0 +1,38 @@
name: Sync to Gitee
on:
push:
branches:
- main
workflow_dispatch:
jobs:
sync:
runs-on: ubuntu-latest
name: Sync GitHub to Gitee
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup SSH
run: |
mkdir -p ~/.ssh
echo "${{ secrets.GITEE_SSH_PRIVATE_KEY }}" > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
ssh-keyscan gitee.com >> ~/.ssh/known_hosts
- name: Create single commit and push
run: |
git config user.name "GitHub Actions"
git config user.email "actions@github.com"
# Create a new branch
git checkout --orphan new-main
git add .
git commit -m "Sync from GitHub: $(date)"
# Add Gitee remote and force push
git remote add gitee ${{ vars.GITEE_REPO_URL }}
git push --force gitee new-main:main

View File

@ -20,7 +20,7 @@ env:
IMAGE_NAME_DOCKERHUB: openlist
REGISTRY: ghcr.io
ARTIFACT_NAME: 'binaries_docker_release'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/ppc64le,linux/riscv64,linux/loong64' ### Temporarily disable Docker builds for linux/s390x architectures for unknown reasons.
IMAGE_PUSH: ${{ github.event_name == 'push' }}
IMAGE_TAGS_BETA: |
type=ref,event=pr
@ -55,6 +55,7 @@ jobs:
run: bash build.sh beta docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
@ -77,15 +78,19 @@ jobs:
image: ["latest", "ffmpeg", "aria2", "aio"]
include:
- image: "latest"
base_image_tag: "base"
build_arg: ""
tag_favor: ""
- image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-ffmpeg,onlatest=true"
- image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-aria2,onlatest=true"
- image: "aio"
base_image_tag: "aio"
build_arg: |
INSTALL_FFMPEG=true
INSTALL_ARIA2=true
@ -137,7 +142,9 @@ jobs:
context: .
file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: ${{ matrix.build_arg }}
build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }}

View File

@ -2,106 +2,76 @@
## Setup your machine
`OpenList` is written in [Go](https://golang.org/) and [React](https://reactjs.org/).
`OpenList` is written in [Go](https://golang.org/) and [SolidJS](https://www.solidjs.com/).
Prerequisites:
- [git](https://git-scm.com)
- [Go 1.20+](https://golang.org/doc/install)
- [Go 1.24+](https://golang.org/doc/install)
- [gcc](https://gcc.gnu.org/)
- [nodejs](https://nodejs.org/)
Clone `OpenList` and `OpenList-Frontend` anywhere:
## Cloning a fork
Fork and clone `OpenList` and `OpenList-Frontend` anywhere:
```shell
$ git clone https://github.com/OpenListTeam/OpenList.git
$ git clone --recurse-submodules https://github.com/OpenListTeam/OpenList-Frontend.git
$ git clone https://github.com/<your-username>/OpenList.git
$ git clone --recurse-submodules https://github.com/<your-username>/OpenList-Frontend.git
```
## Creating a branch
Create a new branch from the `main` branch, with an appropriate name.
```shell
$ git checkout -b <branch-name>
```
You should switch to the `main` branch for development.
## Preview your change
### backend
```shell
$ go run main.go
```
### frontend
```shell
$ pnpm dev
```
## Add a new driver
Copy `drivers/template` folder and rename it, and follow the comments in it.
## Create a commit
Commit messages should be well formatted, and to make that "standardized".
### Commit Message Format
Each commit message consists of a **header**, a **body** and a **footer**. The header has a special
format that includes a **type**, a **scope** and a **subject**:
Submit your pull request. For PR titles, follow [Conventional Commits](https://www.conventionalcommits.org).
```
<type>(<scope>): <subject>
<BLANK LINE>
<body>
<BLANK LINE>
<footer>
```
https://github.com/OpenListTeam/OpenList/issues/376
The **header** is mandatory and the **scope** of the header is optional.
Any line of the commit message cannot be longer than 100 characters! This allows the message to be easier
to read on GitHub as well as in various git tools.
### Revert
If the commit reverts a previous commit, it should begin with `revert: `, followed by the header
of the reverted commit.
In the body it should say: `This reverts commit <hash>.`, where the hash is the SHA of the commit
being reverted.
### Type
Must be one of the following:
* **feat**: A new feature
* **fix**: A bug fix
* **docs**: Documentation only changes
* **style**: Changes that do not affect the meaning of the code (white-space, formatting, missing
semi-colons, etc)
* **refactor**: A code change that neither fixes a bug nor adds a feature
* **perf**: A code change that improves performance
* **test**: Adding missing or correcting existing tests
* **build**: Affects project builds or dependency modifications
* **revert**: Restore the previous commit
* **ci**: Continuous integration of related file modifications
* **chore**: Changes to the build process or auxiliary tools and libraries such as documentation
generation
* **release**: Release a new version
### Scope
The scope could be anything specifying place of the commit change. For example `$location`,
`$browser`, `$compile`, `$rootScope`, `ngHref`, `ngClick`, `ngView`, etc...
You can use `*` when the change affects more than a single scope.
### Subject
The subject contains succinct description of the change:
* use the imperative, present tense: "change" not "changed" nor "changes"
* don't capitalize first letter
* no dot (.) at the end
### Body
Just as in the **subject**, use the imperative, present tense: "change" not "changed" nor "changes".
The body should include the motivation for the change and contrast this with previous behavior.
### Footer
The footer should contain any information about **Breaking Changes** and is also the place to
[reference GitHub issues that this commit closes](https://help.github.com/articles/closing-issues-via-commit-messages/).
**Breaking Changes** should start with the word `BREAKING CHANGE:` with a space or two newlines.
The rest of the commit message is then used for this.
It's suggested to sign your commits. See: [How to sign commits](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits)
## Submit a pull request
Push your branch to your `openlist` fork and open a pull request against the
`main` branch.
Please make sure your code has been formatted with `go fmt` or [prettier](https://prettier.io/) before submitting.
Push your branch to your `openlist` fork and open a pull request against the `main` branch.
## Merge your pull request
Your pull request will be merged after review. Please wait for the maintainer to merge your pull request after review.
At least 1 approving review is required by reviewers with write access. You can also request a review from maintainers.
## Delete your branch
(Optional) After your pull request is merged, you can delete your branch.
---
Thank you for your contribution! Let's make OpenList better together!

View File

@ -1,4 +1,7 @@
FROM docker.io/library/alpine:edge AS builder
### Default image is base. You can add other support by modifying BASE_IMAGE_TAG. The following parameters are supported: base (default), aria2, ffmpeg, aio
ARG BASE_IMAGE_TAG=base
FROM alpine:edge AS builder
LABEL stage=go-builder
WORKDIR /app/
RUN apk add --no-cache bash curl jq gcc git go musl-dev
@ -7,51 +10,27 @@ RUN go mod download
COPY ./ ./
RUN bash build.sh release docker
FROM alpine:edge
FROM openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
LABEL MAINTAINER="OpenList"
ARG INSTALL_FFMPEG=false
ARG INSTALL_ARIA2=false
LABEL MAINTAINER="OpenList"
ARG USER=openlist
ARG UID=1001
ARG GID=1001
WORKDIR /opt/openlist/
RUN apk update && \
apk upgrade --no-cache && \
apk add --no-cache bash ca-certificates su-exec tzdata runit; \
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
mkdir -p /opt/aria2/.aria2 && \
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
mkdir -p /opt/service/stop/aria2/log && \
echo '#!/bin/sh' > /opt/service/stop/aria2/run && \
echo 'exec 2>&1' >> /opt/service/stop/aria2/run && \
echo 'exec aria2c --enable-rpc --rpc-allow-origin-all --conf-path=/opt/aria2/.aria2/aria2.conf' >> /opt/service/stop/aria2/run && \
echo '#!/bin/sh' > /opt/service/stop/aria2/log/run && \
echo 'mkdir -p /opt/openlist/data/log/aria2 2>/dev/null' >> /opt/service/stop/aria2/log/run && \
echo 'exec svlogd /opt/openlist/data/log/aria2' >> /opt/service/stop/aria2/log/run && \
chmod +x /opt/service/stop/aria2/run /opt/service/stop/aria2/log/run && \
touch /opt/aria2/.aria2/aria2.session && \
/opt/aria2/.aria2/tracker.sh ; \
rm -rf /var/cache/apk/*
RUN mkdir -p /opt/service/start/openlist && \
echo '#!/bin/sh' > /opt/service/start/openlist/run && \
echo 'exec 2>&1' >> /opt/service/start/openlist/run && \
echo 'cd /opt/openlist' >> /opt/service/start/openlist/run && \
echo 'exec ./openlist server --no-prefix' >> /opt/service/start/openlist/run && \
chmod +x /opt/service/start/openlist/run
RUN addgroup -g ${GID} ${USER} && \
adduser -D -u ${UID} -G ${USER} ${USER} && \
mkdir -p /opt/openlist/data
COPY --chmod=755 --from=builder /app/bin/openlist ./
COPY --chmod=755 entrypoint.sh /entrypoint.sh
COPY --from=builder --chmod=755 --chown=${UID}:${GID} /app/bin/openlist ./
COPY --chmod=755 --chown=${UID}:${GID} entrypoint.sh /entrypoint.sh
USER ${USER}
RUN /entrypoint.sh version
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
VOLUME /opt/openlist/data/
EXPOSE 5244 5245
CMD [ "/entrypoint.sh" ]

View File

@ -1,49 +1,26 @@
FROM docker.io/library/alpine:edge
ARG BASE_IMAGE_TAG=base
FROM ghcr.io/openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
LABEL MAINTAINER="OpenList"
ARG TARGETPLATFORM
ARG INSTALL_FFMPEG=false
ARG INSTALL_ARIA2=false
LABEL MAINTAINER="OpenList"
ARG USER=openlist
ARG UID=1001
ARG GID=1001
WORKDIR /opt/openlist/
RUN apk update && \
apk upgrade --no-cache && \
apk add --no-cache bash ca-certificates su-exec tzdata runit; \
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
mkdir -p /opt/aria2/.aria2 && \
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
mkdir -p /opt/service/stop/aria2/log && \
echo '#!/bin/sh' > /opt/service/stop/aria2/run && \
echo 'exec 2>&1' >> /opt/service/stop/aria2/run && \
echo 'exec aria2c --enable-rpc --rpc-allow-origin-all --conf-path=/opt/aria2/.aria2/aria2.conf' >> /opt/service/stop/aria2/run && \
echo '#!/bin/sh' > /opt/service/stop/aria2/log/run && \
echo 'mkdir -p /opt/openlist/data/log/aria2 2>/dev/null' >> /opt/service/stop/aria2/log/run && \
echo 'exec svlogd /opt/openlist/data/log/aria2' >> /opt/service/stop/aria2/log/run && \
chmod +x /opt/service/stop/aria2/run /opt/service/stop/aria2/log/run && \
touch /opt/aria2/.aria2/aria2.session && \
/opt/aria2/.aria2/tracker.sh ; \
rm -rf /var/cache/apk/*
RUN mkdir -p /opt/service/start/openlist && \
echo '#!/bin/sh' > /opt/service/start/openlist/run && \
echo 'exec 2>&1' >> /opt/service/start/openlist/run && \
echo 'cd /opt/openlist' >> /opt/service/start/openlist/run && \
echo 'exec ./openlist server --no-prefix' >> /opt/service/start/openlist/run && \
chmod +x /opt/service/start/openlist/run
RUN addgroup -g ${GID} ${USER} && \
adduser -D -u ${UID} -G ${USER} ${USER} && \
mkdir -p /opt/openlist/data
COPY --chmod=755 /build/${TARGETPLATFORM}/openlist ./
COPY --chmod=755 entrypoint.sh /entrypoint.sh
COPY --chmod=755 --chown=${UID}:${GID} /build/${TARGETPLATFORM}/openlist ./
COPY --chmod=755 --chown=${UID}:${GID} entrypoint.sh /entrypoint.sh
USER ${USER}
RUN /entrypoint.sh version
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
VOLUME /opt/openlist/data/
EXPOSE 5244 5245
CMD [ "/entrypoint.sh" ]

248
build.sh
View File

@ -4,6 +4,9 @@ builtAt="$(date +'%F %T %z')"
gitAuthor="The OpenList Projects Contributors <noreply@openlist.team>"
gitCommit=$(git log --pretty=format:"%h" -1)
# Set frontend repository, default to OpenListTeam/OpenList-Frontend
frontendRepo="${FRONTEND_REPO:-OpenListTeam/OpenList-Frontend}"
githubAuthArgs=""
if [ -n "$GITHUB_TOKEN" ]; then
githubAuthArgs="--header \"Authorization: Bearer $GITHUB_TOKEN\""
@ -17,15 +20,15 @@ fi
if [ "$1" = "dev" ]; then
version="dev"
webVersion="dev"
webVersion="rolling"
elif [ "$1" = "beta" ]; then
version="beta"
webVersion="dev"
webVersion="rolling"
else
git tag -d beta || true
# Always true if there's no tag
version=$(git describe --abbrev=0 --tags 2>/dev/null || echo "v0.0.0")
webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/$frontendRepo/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
fi
echo "backend version: $version"
@ -45,30 +48,21 @@ ldflags="\
-X 'github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=$webVersion' \
"
FetchWebDev() {
pre_release_tag=$(eval "curl -fsSL --max-time 2 $githubAuthArgs https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases" | jq -r 'map(select(.prerelease)) | first | .tag_name')
if [ -z "$pre_release_tag" ] || [ "$pre_release_tag" == "null" ]; then
# fall back to latest release
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"")
else
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/tags/$pre_release_tag\"")
fi
FetchWebRolling() {
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/tags/rolling\"")
pre_release_assets=$(echo "$pre_release_json" | jq -r '.assets[].browser_download_url')
if [ "$useLite" = true ]; then
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist-lite" | grep "\.tar\.gz$")
else
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist" | grep -v "lite" | grep "\.tar\.gz$")
fi
curl -fsSL "$pre_release_tar_url" -o web-dist-dev.tar.gz
# There is no lite for rolling
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist" | grep -v "lite" | grep "\.tar\.gz$")
curl -fsSL "$pre_release_tar_url" -o dist.tar.gz
rm -rf public/dist && mkdir -p public/dist
tar -zxvf web-dist-dev.tar.gz -C public/dist
rm -rf web-dist-dev.tar.gz
tar -zxvf dist.tar.gz -C public/dist
rm -rf dist.tar.gz
}
FetchWebRelease() {
release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"")
release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/latest\"")
release_assets=$(echo "$release_json" | jq -r '.assets[].browser_download_url')
if [ "$useLite" = true ]; then
@ -95,6 +89,45 @@ BuildWinArm64() {
go build -o "$1" -ldflags="$ldflags" -tags=jsoniter .
}
BuildWin7() {
# Setup Win7 Go compiler (patched version that supports Windows 7)
go_version=$(go version | grep -o 'go[0-9]\+\.[0-9]\+\.[0-9]\+' | sed 's/go//')
echo "Detected Go version: $go_version"
curl -fsSL --retry 3 -o go-win7.zip -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/XTLS/go-win7/releases/download/patched-${go_version}/go-for-win7-linux-amd64.zip"
rm -rf go-win7
unzip go-win7.zip -d go-win7
rm go-win7.zip
# Set permissions for all wrapper files
chmod +x ./wrapper/zcc-win7
chmod +x ./wrapper/zcxx-win7
chmod +x ./wrapper/zcc-win7-386
chmod +x ./wrapper/zcxx-win7-386
# Build for both 386 and amd64 architectures
for arch in "386" "amd64"; do
echo "building for windows7-${arch}"
export GOOS=windows
export GOARCH=${arch}
export CGO_ENABLED=1
# Use architecture-specific wrapper files
if [ "$arch" = "386" ]; then
export CC=$(pwd)/wrapper/zcc-win7-386
export CXX=$(pwd)/wrapper/zcxx-win7-386
else
export CC=$(pwd)/wrapper/zcc-win7
export CXX=$(pwd)/wrapper/zcxx-win7
fi
# Use the patched Go compiler for Win7 compatibility
$(pwd)/go-win7/bin/go build -o "${1}-${arch}.exe" -ldflags="$ldflags" -tags=jsoniter .
done
}
BuildDev() {
rm -rf .git/
mkdir -p "dist"
@ -134,7 +167,7 @@ BuildDocker() {
PrepareBuildDockerMusl() {
mkdir -p build/musl-libs
BASE="https://github.com/OpenListTeam/musl-compilers/releases/latest/download/"
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross)
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross loongarch64-linux-musl-cross) ## Disable s390x-linux-musl-cross builds
for i in "${FILES[@]}"; do
url="${BASE}${i}.tgz"
lib_tgz="build/${i}.tgz"
@ -153,8 +186,8 @@ BuildDockerMultiplatform() {
docker_lflags="--extldflags '-static -fpic' $ldflags"
export CGO_ENABLED=1
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x linux-riscv64 linux-ppc64le)
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc)
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-riscv64 linux-ppc64le linux-loong64) ## Disable linux-s390x builds
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc loongarch64-linux-musl-gcc) ## Disable s390x-linux-musl-gcc builds
for i in "${!OS_ARCHES[@]}"; do
os_arch=${OS_ARCHES[$i]}
cgo_cc=${CGO_ARGS[$i]}
@ -186,12 +219,171 @@ BuildRelease() {
rm -rf .git/
mkdir -p "build"
BuildWinArm64 ./build/"$appName"-windows-arm64.exe
BuildWin7 ./build/"$appName"-windows7
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
# why? Because some target platforms seem to have issues with upx compression
# upx -9 ./"$appName"-linux-amd64
# cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
# upx -9 ./"$appName"-windows-amd64-upx.exe
mv "$appName"-* build
# Build LoongArch with glibc (both old world abi1.0 and new world abi2.0)
# Separate from musl builds to avoid cache conflicts
BuildLoongGLIBC ./build/$appName-linux-loong64-abi1.0 abi1.0
BuildLoongGLIBC ./build/$appName-linux-loong64 abi2.0
}
BuildLoongGLIBC() {
local target_abi="$2"
local output_file="$1"
local oldWorldGoVersion="1.24.3"
if [ "$target_abi" = "abi1.0" ]; then
echo building for linux-loong64-abi1.0
else
echo building for linux-loong64-abi2.0
target_abi="abi2.0" # Default to abi2.0 if not specified
fi
# Note: No longer need global cache cleanup since ABI1.0 uses isolated cache directory
echo "Using optimized cache strategy: ABI1.0 has isolated cache, ABI2.0 uses standard cache"
if [ "$target_abi" = "abi1.0" ]; then
# Setup abi1.0 toolchain and patched Go compiler similar to cgo-action implementation
echo "Setting up Loongson old-world ABI1.0 toolchain and patched Go compiler..."
# Download and setup patched Go compiler for old-world
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
-o go-loong64-abi1.0.tar.gz; then
echo "Error: Failed to download patched Go compiler for old-world ABI1.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
-o go-loong64-abi1.0.tar.gz || true
fi
return 1
fi
rm -rf go-loong64-abi1.0
mkdir go-loong64-abi1.0
if ! tar -xzf go-loong64-abi1.0.tar.gz -C go-loong64-abi1.0 --strip-components=1; then
echo "Error: Failed to extract patched Go compiler"
return 1
fi
rm go-loong64-abi1.0.tar.gz
# Download and setup GCC toolchain for old-world
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/loongson-gnu-toolchain-8.3.novec-x86_64-loongarch64-linux-gnu-rc1.1.tar.xz" \
-o gcc8-loong64-abi1.0.tar.xz; then
echo "Error: Failed to download GCC toolchain for old-world ABI1.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/loongson-gnu-toolchain-8.3.novec-x86_64-loongarch64-linux-gnu-rc1.1.tar.xz" \
-o gcc8-loong64-abi1.0.tar.xz || true
fi
return 1
fi
rm -rf gcc8-loong64-abi1.0
mkdir gcc8-loong64-abi1.0
if ! tar -Jxf gcc8-loong64-abi1.0.tar.xz -C gcc8-loong64-abi1.0 --strip-components=1; then
echo "Error: Failed to extract GCC toolchain"
return 1
fi
rm gcc8-loong64-abi1.0.tar.xz
# Setup separate cache directory for ABI1.0 to avoid cache pollution
abi1_cache_dir="$(pwd)/go-loong64-abi1.0-cache"
mkdir -p "$abi1_cache_dir"
echo "Using separate cache directory for ABI1.0: $abi1_cache_dir"
# Use patched Go compiler for old-world build (critical for ABI1.0 compatibility)
echo "Building with patched Go compiler for old-world ABI1.0..."
echo "Using isolated cache directory: $abi1_cache_dir"
# Use env command to set environment variables locally without affecting global environment
if ! env GOOS=linux GOARCH=loong64 \
CC="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc" \
CXX="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++" \
CGO_ENABLED=1 \
GOCACHE="$abi1_cache_dir" \
$(pwd)/go-loong64-abi1.0/bin/go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed with patched Go compiler"
echo "Attempting retry with cache cleanup..."
env GOCACHE="$abi1_cache_dir" $(pwd)/go-loong64-abi1.0/bin/go clean -cache
if ! env GOOS=linux GOARCH=loong64 \
CC="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc" \
CXX="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++" \
CGO_ENABLED=1 \
GOCACHE="$abi1_cache_dir" \
$(pwd)/go-loong64-abi1.0/bin/go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed again after cache cleanup"
echo "Build environment details:"
echo "GOOS=linux"
echo "GOARCH=loong64"
echo "CC=$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc"
echo "CXX=$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++"
echo "CGO_ENABLED=1"
echo "GOCACHE=$abi1_cache_dir"
echo "Go version: $($(pwd)/go-loong64-abi1.0/bin/go version)"
echo "GCC version: $($(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc --version | head -1)"
return 1
fi
fi
else
# Setup abi2.0 toolchain for new world glibc build
echo "Setting up new-world ABI2.0 toolchain..."
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/cross-tools/releases/download/20250507/x86_64-cross-tools-loongarch64-unknown-linux-gnu-legacy.tar.xz" \
-o gcc12-loong64-abi2.0.tar.xz; then
echo "Error: Failed to download GCC toolchain for new-world ABI2.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/cross-tools/releases/download/20250507/x86_64-cross-tools-loongarch64-unknown-linux-gnu-legacy.tar.xz" \
-o gcc12-loong64-abi2.0.tar.xz || true
fi
return 1
fi
rm -rf gcc12-loong64-abi2.0
mkdir gcc12-loong64-abi2.0
if ! tar -Jxf gcc12-loong64-abi2.0.tar.xz -C gcc12-loong64-abi2.0 --strip-components=1; then
echo "Error: Failed to extract GCC toolchain"
return 1
fi
rm gcc12-loong64-abi2.0.tar.xz
export GOOS=linux
export GOARCH=loong64
export CC=$(pwd)/gcc12-loong64-abi2.0/bin/loongarch64-unknown-linux-gnu-gcc
export CXX=$(pwd)/gcc12-loong64-abi2.0/bin/loongarch64-unknown-linux-gnu-g++
export CGO_ENABLED=1
# Use standard Go compiler for new-world build
echo "Building with standard Go compiler for new-world ABI2.0..."
if ! go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed with standard Go compiler"
echo "Attempting retry with cache cleanup..."
go clean -cache
if ! go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed again after cache cleanup"
echo "Build environment details:"
echo "GOOS=$GOOS"
echo "GOARCH=$GOARCH"
echo "CC=$CC"
echo "CXX=$CXX"
echo "CGO_ENABLED=$CGO_ENABLED"
echo "Go version: $(go version)"
echo "GCC version: $($CC --version | head -1)"
return 1
fi
fi
fi
}
BuildReleaseLinuxMusl() {
@ -249,6 +441,7 @@ BuildReleaseLinuxMuslArm() {
done
}
BuildReleaseAndroid() {
rm -rf .git/
mkdir -p "build"
@ -344,7 +537,7 @@ MakeRelease() {
tar -czvf compress/"$i$liteSuffix".tar.gz "$appName"
rm -f "$appName"
done
for i in $(find . -type f -name "$appName-windows-*"); do
for i in $(find . -type f \( -name "$appName-windows-*" -o -name "$appName-windows7-*" \)); do
cp "$i" "$appName".exe
zip compress/$(echo $i | sed 's/\.[^.]*$//')$liteSuffix.zip "$appName".exe
rm -f "$appName".exe
@ -391,7 +584,7 @@ for arg in "$@"; do
done
if [ "$buildType" = "dev" ]; then
FetchWebDev
FetchWebRolling
if [ "$dockerType" = "docker" ]; then
BuildDocker
elif [ "$dockerType" = "docker-multiplatform" ]; then
@ -403,7 +596,7 @@ if [ "$buildType" = "dev" ]; then
fi
elif [ "$buildType" = "release" -o "$buildType" = "beta" ]; then
if [ "$buildType" = "beta" ]; then
FetchWebDev
FetchWebRolling
else
FetchWebRelease
fi
@ -484,4 +677,5 @@ else
echo -e " $0 release"
echo -e " $0 release lite"
echo -e " $0 release docker lite"
echo -e " $0 release linux_musl"
fi

View File

@ -4,6 +4,8 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
package cmd
import (
"fmt"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
@ -24,10 +26,11 @@ var AdminCmd = &cobra.Command{
if err != nil {
utils.Log.Errorf("failed get admin user: %+v", err)
} else {
utils.Log.Infof("Admin user's username: %s", admin.Username)
utils.Log.Infof("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed")
utils.Log.Infof("You can reset the password with a random string by running [openlist admin random]")
utils.Log.Infof("You can also set a new password by running [openlist admin set NEW_PASSWORD]")
utils.Log.Infof("get admin user from CLI")
fmt.Println("Admin user's username:", admin.Username)
fmt.Println("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed")
fmt.Println("You can reset the password with a random string by running [openlist admin random]")
fmt.Println("You can also set a new password by running [openlist admin set NEW_PASSWORD]")
}
},
}
@ -36,6 +39,7 @@ var RandomPasswordCmd = &cobra.Command{
Use: "random",
Short: "Reset admin user's password to a random string",
Run: func(cmd *cobra.Command, args []string) {
utils.Log.Infof("reset admin user's password to a random string from CLI")
newPwd := random.String(8)
setAdminPassword(newPwd)
},
@ -44,12 +48,12 @@ var RandomPasswordCmd = &cobra.Command{
var SetPasswordCmd = &cobra.Command{
Use: "set",
Short: "Set admin user's password",
Run: func(cmd *cobra.Command, args []string) {
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
utils.Log.Errorf("Please enter the new password")
return
return fmt.Errorf("Please enter the new password")
}
setAdminPassword(args[0])
return nil
},
}
@ -60,7 +64,8 @@ var ShowTokenCmd = &cobra.Command{
Init()
defer Release()
token := setting.GetStr(conf.Token)
utils.Log.Infof("Admin token: %s", token)
utils.Log.Infof("show admin token from CLI")
fmt.Println("Admin token:", token)
},
}
@ -77,9 +82,10 @@ func setAdminPassword(pwd string) {
utils.Log.Errorf("failed update admin user: %+v", err)
return
}
utils.Log.Infof("admin user has been updated:")
utils.Log.Infof("username: %s", admin.Username)
utils.Log.Infof("password: %s", pwd)
utils.Log.Infof("admin user has been update from CLI")
fmt.Println("admin user has been updated:")
fmt.Println("username:", admin.Username)
fmt.Println("password:", pwd)
DelAdminCacheOnline()
}

View File

@ -4,6 +4,8 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
package cmd
import (
"fmt"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/spf13/cobra"
@ -24,7 +26,8 @@ var Cancel2FACmd = &cobra.Command{
if err != nil {
utils.Log.Errorf("failed to cancel 2FA: %+v", err)
} else {
utils.Log.Info("2FA canceled")
utils.Log.Infof("2FA is canceled from CLI")
fmt.Println("2FA canceled")
DelAdminCacheOnline()
}
}

View File

@ -48,7 +48,15 @@ the address is defined in config file`,
gin.SetMode(gin.ReleaseMode)
}
r := gin.New()
r.Use(middlewares.HTTPFilteredLogger(), gin.RecoveryWithWriter(log.StandardLogger().Out))
// gin log
if conf.Conf.Log.Filter.Enable {
r.Use(middlewares.FilteredLogger())
} else {
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out))
}
r.Use(gin.RecoveryWithWriter(log.StandardLogger().Out))
server.Init(r)
var httpHandler http.Handler = r
if conf.Conf.Scheme.EnableH2c {
@ -57,6 +65,7 @@ the address is defined in config file`,
var httpSrv, httpsSrv, unixSrv *http.Server
if conf.Conf.Scheme.HttpPort != -1 {
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
fmt.Printf("start HTTP server @ %s\n", httpBase)
utils.Log.Infof("start HTTP server @ %s", httpBase)
httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler}
go func() {
@ -68,6 +77,7 @@ the address is defined in config file`,
}
if conf.Conf.Scheme.HttpsPort != -1 {
httpsBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpsPort)
fmt.Printf("start HTTPS server @ %s\n", httpsBase)
utils.Log.Infof("start HTTPS server @ %s", httpsBase)
httpsSrv = &http.Server{Addr: httpsBase, Handler: r}
go func() {
@ -78,6 +88,7 @@ the address is defined in config file`,
}()
}
if conf.Conf.Scheme.UnixFile != "" {
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile)
unixSrv = &http.Server{Handler: httpHandler}
go func() {
@ -103,9 +114,10 @@ the address is defined in config file`,
}
if conf.Conf.S3.Port != -1 && conf.Conf.S3.Enable {
s3r := gin.New()
s3r.Use(middlewares.S3FilteredLogger(), gin.RecoveryWithWriter(log.StandardLogger().Out))
s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
server.InitS3(s3r)
s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port)
fmt.Printf("start S3 server @ %s\n", s3Base)
utils.Log.Infof("start S3 server @ %s", s3Base)
go func() {
var err error
@ -130,6 +142,7 @@ the address is defined in config file`,
if err != nil {
utils.Log.Fatalf("failed to start ftp driver: %s", err.Error())
} else {
fmt.Printf("start ftp server on %s\n", conf.Conf.FTP.Listen)
utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen)
go func() {
ftpServer = ftpserver.NewFtpServer(ftpDriver)
@ -148,6 +161,7 @@ the address is defined in config file`,
if err != nil {
utils.Log.Fatalf("failed to start sftp driver: %s", err.Error())
} else {
fmt.Printf("start sftp server on %s", conf.Conf.SFTP.Listen)
utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen)
go func() {
sftpServer = sftpd.NewSftpServer(sftpDriver)

View File

@ -4,6 +4,7 @@ Copyright © 2023 NAME HERE <EMAIL ADDRESS>
package cmd
import (
"fmt"
"os"
"strconv"
@ -22,28 +23,61 @@ var storageCmd = &cobra.Command{
}
var disableStorageCmd = &cobra.Command{
Use: "disable",
Short: "Disable a storage",
Run: func(cmd *cobra.Command, args []string) {
Use: "disable [mount path]",
Short: "Disable a storage by mount path",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
utils.Log.Errorf("mount path is required")
return
return fmt.Errorf("mount path is required")
}
mountPath := args[0]
Init()
defer Release()
storage, err := db.GetStorageByMountPath(mountPath)
if err != nil {
utils.Log.Errorf("failed to query storage: %+v", err)
} else {
storage.Disabled = true
err = db.UpdateStorage(storage)
if err != nil {
utils.Log.Errorf("failed to update storage: %+v", err)
} else {
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath)
return fmt.Errorf("failed to query storage: %+v", err)
}
storage.Disabled = true
err = db.UpdateStorage(storage)
if err != nil {
return fmt.Errorf("failed to update storage: %+v", err)
}
utils.Log.Infof("Storage with mount path [%s] has been disabled from CLI", mountPath)
fmt.Printf("Storage with mount path [%s] has been disabled\n", mountPath)
return nil
},
}
var deleteStorageCmd = &cobra.Command{
Use: "delete [id]",
Short: "Delete a storage by id",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return fmt.Errorf("id is required")
}
id, err := strconv.Atoi(args[0])
if err != nil {
return fmt.Errorf("id must be a number")
}
if force, _ := cmd.Flags().GetBool("force"); force {
fmt.Printf("Are you sure you want to delete storage with id [%d]? [y/N]: ", id)
var confirm string
fmt.Scanln(&confirm)
if confirm != "y" && confirm != "Y" {
fmt.Println("Delete operation cancelled.")
return nil
}
}
Init()
defer Release()
err = db.DeleteStorageById(uint(id))
if err != nil {
return fmt.Errorf("failed to delete storage by id: %+v", err)
}
utils.Log.Infof("Storage with id [%d] have been deleted from CLI", id)
fmt.Printf("Storage with id [%d] have been deleted\n", id)
return nil
},
}
@ -88,14 +122,14 @@ var storageTableHeight int
var listStorageCmd = &cobra.Command{
Use: "list",
Short: "List all storages",
Run: func(cmd *cobra.Command, args []string) {
RunE: func(cmd *cobra.Command, args []string) error {
Init()
defer Release()
storages, _, err := db.GetStorages(1, -1)
if err != nil {
utils.Log.Errorf("failed to query storages: %+v", err)
return fmt.Errorf("failed to query storages: %+v", err)
} else {
utils.Log.Infof("Found %d storages", len(storages))
fmt.Printf("Found %d storages\n", len(storages))
columns := []table.Column{
{Title: "ID", Width: 4},
{Title: "Driver", Width: 16},
@ -138,10 +172,11 @@ var listStorageCmd = &cobra.Command{
m := model{t}
if _, err := tea.NewProgram(m).Run(); err != nil {
utils.Log.Errorf("failed to run program: %+v", err)
fmt.Printf("failed to run program: %+v\n", err)
os.Exit(1)
}
}
return nil
},
}
@ -151,6 +186,8 @@ func init() {
storageCmd.AddCommand(disableStorageCmd)
storageCmd.AddCommand(listStorageCmd)
storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height")
storageCmd.AddCommand(deleteStorageCmd)
deleteStorageCmd.Flags().BoolP("force", "f", false, "Force delete without confirmation")
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command

View File

@ -6,10 +6,9 @@ services:
ports:
- '5244:5244'
- '5245:5245'
user: '0:0'
environment:
- PUID=0
- PGID=0
- UMASK=022
- TZ=UTC
- TZ=Asia/Shanghai
container_name: openlist
image: 'openlistteam/openlist:latest'

View File

@ -1,43 +1,60 @@
package _115
import (
"errors"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
log "github.com/sirupsen/logrus"
)
var (
md5Salt = "Qclm8MGWUv59TnrR0XPg"
appVer = "27.0.5.7"
appVer = "35.6.0.3"
)
func (d *Pan115) getAppVersion() ([]driver115.AppVersion, error) {
result := driver115.VersionResp{}
resp, err := base.RestyClient.R().Get(driver115.ApiGetVersion)
err = driver115.CheckErr(err, &result, resp)
func (d *Pan115) getAppVersion() (string, error) {
result := VersionResp{}
res, err := base.RestyClient.R().Get(driver115.ApiGetVersion)
if err != nil {
return nil, err
return "", err
}
return result.Data.GetAppVersions(), nil
err = utils.Json.Unmarshal(res.Body(), &result)
if err != nil {
return "", err
}
if len(result.Error) > 0 {
return "", errors.New(result.Error)
}
return result.Data.Win.Version, nil
}
func (d *Pan115) getAppVer() string {
// todo add some cache
vers, err := d.getAppVersion()
ver, err := d.getAppVersion()
if err != nil {
log.Warnf("[115] get app version failed: %v", err)
return appVer
}
for _, ver := range vers {
if ver.AppName == "win" {
return ver.Version
}
if len(ver) > 0 {
return ver
}
return appVer
}
func (d *Pan115) initAppVer() {
appVer = d.getAppVer()
log.Debugf("use app version: %v", appVer)
}
type VersionResp struct {
Error string `json:"error,omitempty"`
Data Versions `json:"data"`
}
type Versions struct {
Win Version `json:"win"`
}
type Version struct {
Version string `json:"version_code"`
}

View File

@ -186,9 +186,7 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
preHash = strings.ToUpper(preHash)
fullHash := stream.GetHash().GetHash(utils.SHA1)
if len(fullHash) != utils.SHA1.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA1)
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
if err != nil {
return nil, err
}

View File

@ -321,7 +321,7 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
err error
)
tmpF, err := s.CacheFullInTempFile()
tmpF, err := s.CacheFullAndWriter(&up, nil)
if err != nil {
return nil, err
}

View File

@ -131,6 +131,23 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}, nil
}
func (d *Open115) GetObjInfo(ctx context.Context, path string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
resp, err := d.client.GetFolderInfoByPath(ctx, path)
if err != nil {
return nil, err
}
return &Obj{
Fid: resp.FileID,
Fn: resp.FileName,
Fc: resp.FileCategory,
Sha1: resp.Sha1,
Pc: resp.PickCode,
}, nil
}
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
@ -222,9 +239,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
}
sha1 := file.GetHash().GetHash(utils.SHA1)
if len(sha1) != utils.SHA1.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, sha1, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.SHA1)
_, sha1, err = stream.CacheFullAndHash(file, &up, utils.SHA1)
if err != nil {
return err
}

View File

@ -9,6 +9,7 @@ import (
sdk "github.com/OpenListTeam/115-sdk-go"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/avast/retry-go"
@ -69,9 +70,6 @@ func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp
// }
func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
fileSize := stream.GetSize()
chunkSize := calPartSize(fileSize)
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
if err != nil {
return err
@ -86,6 +84,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
return err
}
fileSize := stream.GetSize()
chunkSize := calPartSize(fileSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize), &up)
if err != nil {
return err
}
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
parts := make([]oss.UploadPart, partNum)
offset := int64(0)
@ -98,10 +103,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
if i == partNum {
partSize = fileSize - (i-1)*chunkSize
}
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
rd, err := ss.GetSectionReader(offset, partSize)
if err != nil {
return err
}
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
err = retry.Do(func() error {
_ = rd.Reset()
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
rd.Seek(0, io.SeekStart)
part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i))
if err != nil {
return err
@ -112,6 +120,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
ss.FreeSectionReader(rd)
if err != nil {
return err
}

View File

@ -64,14 +64,6 @@ func (d *Pan123) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if f, ok := file.(File); ok {
//var resp DownResp
var headers map[string]string
if !utils.IsLocalIPAddr(args.IP) {
headers = map[string]string{
//"X-Real-IP": "1.1.1.1",
"X-Forwarded-For": args.IP,
}
}
data := base.Json{
"driveId": 0,
"etag": f.Etag,
@ -83,25 +75,27 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}
resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetHeaders(headers)
req.SetBody(data)
}, nil)
if err != nil {
return nil, err
}
downloadUrl := utils.Json.Get(resp, "data", "DownloadUrl").ToString()
u, err := url.Parse(downloadUrl)
ou, err := url.Parse(downloadUrl)
if err != nil {
return nil, err
}
nu := u.Query().Get("params")
u_ := ou.String()
nu := ou.Query().Get("params")
if nu != "" {
du, _ := base64.StdEncoding.DecodeString(nu)
u, err = url.Parse(string(du))
u, err := url.Parse(string(du))
if err != nil {
return nil, err
}
u_ = u.String()
}
u_ := u.String()
log.Debug("download url: ", u_)
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
if err != nil {
@ -118,7 +112,7 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
}
link.Header = http.Header{
"Referer": []string{"https://www.123pan.com/"},
"Referer": []string{fmt.Sprintf("%s://%s/", ou.Scheme, ou.Host)},
}
return &link, nil
} else {
@ -188,9 +182,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
etag := file.GetHash().GetHash(utils.MD5)
var err error
if len(etag) < utils.MD5.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
if err != nil {
return err
}

View File

@ -11,7 +11,8 @@ type Addition struct {
driver.RootID
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
AccessToken string
AccessToken string
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
}
var config = driver.Config{
@ -22,6 +23,11 @@ var config = driver.Config{
func init() {
op.RegisterDriver(func() driver.Driver {
return &Pan123{}
// 新增默认选项 要在RegisterDriver初始化设置 才会对正在使用的用户生效
return &Pan123{
Addition: Addition{
UploadThread: 3,
},
}
})
}

View File

@ -6,11 +6,16 @@ import (
"io"
"net/http"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
)
@ -69,18 +74,21 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
}
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
tmpF, err := file.CacheFullInTempFile()
// fetch s3 pre signed urls
size := file.GetSize()
chunkSize := int64(16 * utils.MB)
chunkCount := 1
if size > chunkSize {
chunkCount = int((size + chunkSize - 1) / chunkSize)
}
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return err
}
// fetch s3 pre signed urls
size := file.GetSize()
chunkSize := min(size, 16*utils.MB)
chunkCount := int(size / chunkSize)
lastChunkSize := size % chunkSize
if lastChunkSize > 0 {
chunkCount++
} else {
if lastChunkSize == 0 {
lastChunkSize = chunkSize
}
// only 1 batch is allowed
@ -90,73 +98,99 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
batchSize = 10
getS3UploadUrl = d.getS3PreSignedUrls
}
thread := min(int(chunkCount), d.UploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for i := 1; i <= chunkCount; i += batchSize {
if utils.IsCanceled(ctx) {
return ctx.Err()
if utils.IsCanceled(uploadCtx) {
break
}
start := i
end := min(i+batchSize, chunkCount+1)
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end)
s3PreSignedUrls, err := getS3UploadUrl(uploadCtx, upReq, start, end)
if err != nil {
return err
}
// upload each chunk
for j := start; j < end; j++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
for cur := start; cur < end; cur++ {
if utils.IsCanceled(uploadCtx) {
break
}
offset := int64(cur-1) * chunkSize
curSize := chunkSize
if j == chunkCount {
if cur == chunkCount {
curSize = lastChunkSize
}
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.NewSectionReader(tmpF, chunkSize*int64(j-1), curSize), curSize, false, getS3UploadUrl)
if err != nil {
return err
}
up(float64(j) * 100 / float64(chunkCount))
var reader *stream.SectionReader
var rateLimitedRd io.Reader
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
reader, err = ss.GetSectionReader(offset, curSize)
if err != nil {
return err
}
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
return nil
},
Do: func(ctx context.Context) error {
reader.Seek(0, io.SeekStart)
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
if uploadUrl == "" {
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
}
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, rateLimitedRd)
if err != nil {
return err
}
req.ContentLength = curSize
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode == http.StatusForbidden {
singleflight.AnyGroup.Do(fmt.Sprintf("Pan123.newUpload_%p", threadG), func() (any, error) {
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
if err != nil {
return nil, err
}
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
return nil, nil
})
if err != nil {
return err
}
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
}
if res.StatusCode != http.StatusOK {
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
}
progress := 10.0 + 85.0*float64(threadG.Success())/float64(chunkCount)
up(progress)
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
})
}
}
if err := threadG.Wait(); err != nil {
return err
}
defer up(100)
// complete s3 upload
return d.completeS3(ctx, upReq, file, chunkCount > 1)
}
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader *io.SectionReader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
if uploadUrl == "" {
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
}
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, reader))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = curSize
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode == http.StatusForbidden {
if retry {
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
}
// refresh s3 pre signed urls
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
if err != nil {
return err
}
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
// retry
reader.Seek(0, io.SeekStart)
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
}
if res.StatusCode != http.StatusOK {
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
}
return nil
}

View File

@ -2,7 +2,9 @@ package _123_open
import (
"context"
"fmt"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
@ -15,6 +17,7 @@ import (
type Open123 struct {
model.Storage
Addition
UID uint64
}
func (d *Open123) Config() driver.Config {
@ -67,13 +70,45 @@ func (d *Open123) List(ctx context.Context, dir model.Obj, args model.ListArgs)
func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
fileId, _ := strconv.ParseInt(file.GetID(), 10, 64)
if d.DirectLink {
res, err := d.getDirectLink(fileId)
if err != nil {
return nil, err
}
if d.DirectLinkPrivateKey == "" {
duration := 365 * 24 * time.Hour // 缓存1年
return &model.Link{
URL: res.Data.URL,
Expiration: &duration,
}, nil
}
uid, err := d.getUID()
if err != nil {
return nil, err
}
duration := time.Duration(d.DirectLinkValidDuration) * time.Minute
newURL, err := d.SignURL(res.Data.URL, d.DirectLinkPrivateKey,
uid, duration)
if err != nil {
return nil, err
}
return &model.Link{
URL: newURL,
Expiration: &duration,
}, nil
}
res, err := d.getDownloadInfo(fileId)
if err != nil {
return nil, err
}
link := model.Link{URL: res.Data.DownloadUrl}
return &link, nil
return &model.Link{URL: res.Data.DownloadUrl}, nil
}
func (d *Open123) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
@ -95,6 +130,22 @@ func (d *Open123) Rename(ctx context.Context, srcObj model.Obj, newName string)
}
func (d *Open123) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
// 尝试使用上传+MD5秒传功能实现复制
// 1. 创建文件
// parentFileID 父目录id上传到根目录时填写 0
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
if err != nil {
return fmt.Errorf("parse parentFileID error: %v", err)
}
etag := srcObj.(File).Etag
createResp, err := d.create(parentFileId, srcObj.GetName(), etag, srcObj.GetSize(), 2, false)
if err != nil {
return err
}
// 是否秒传
if createResp.Data.Reuse {
return nil
}
return errs.NotSupport
}
@ -104,27 +155,64 @@ func (d *Open123) Remove(ctx context.Context, obj model.Obj) error {
return d.trash(fileId)
}
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// 1. 创建文件
// parentFileID 父目录id上传到根目录时填写 0
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("parse parentFileID error: %v", err)
}
// etag 文件md5
etag := file.GetHash().GetHash(utils.MD5)
if len(etag) < utils.MD5.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
if err != nil {
return err
return nil, err
}
}
createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false)
if err != nil {
return err
return nil, err
}
// 是否秒传
if createResp.Data.Reuse {
return nil
// 秒传成功才会返回正确的 FileID否则为 0
if createResp.Data.FileID != 0 {
return File{
FileName: file.GetName(),
Size: file.GetSize(),
FileId: createResp.Data.FileID,
Type: 2,
Etag: etag,
}, nil
}
}
return d.Upload(ctx, file, createResp, up)
// 2. 上传分片
err = d.Upload(ctx, file, createResp, up)
if err != nil {
return nil, err
}
// 3. 上传完毕
for range 60 {
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
// 返回错误代码未知20103文档也没有具体说
if err == nil && uploadCompleteResp.Data.Completed && uploadCompleteResp.Data.FileID != 0 {
up(100)
return File{
FileName: file.GetName(),
Size: file.GetSize(),
FileId: uploadCompleteResp.Data.FileID,
Type: 2,
Etag: etag,
}, nil
}
// 若接口返回的completed为 false 时则需间隔1秒继续轮询此接口获取上传最终结果。
time.Sleep(time.Second)
}
return nil, fmt.Errorf("upload complete timeout")
}
var _ driver.Driver = (*Open123)(nil)
var _ driver.PutResult = (*Open123)(nil)

View File

@ -23,6 +23,11 @@ type Addition struct {
// 上传线程数
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
// 使用直链
DirectLink bool `json:"DirectLink" type:"bool" default:"false" required:"false" help:"use direct link when download file"`
DirectLinkPrivateKey string `json:"DirectLinkPrivateKey" required:"false" help:"private key for direct link, if URL authentication is enabled"`
DirectLinkValidDuration int64 `json:"DirectLinkValidDuration" type:"number" default:"30" required:"false" help:"minutes, if URL authentication is enabled"`
driver.RootID
}

View File

@ -73,7 +73,9 @@ func (f File) GetName() string {
}
func (f File) CreateTime() time.Time {
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.CreateAt)
// 返回的时间没有时区信息,默认 UTC+8
loc := time.FixedZone("UTC+8", 8*60*60)
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.CreateAt, loc)
if err != nil {
return time.Now()
}
@ -81,7 +83,9 @@ func (f File) CreateTime() time.Time {
}
func (f File) ModTime() time.Time {
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.UpdateAt)
// 返回的时间没有时区信息,默认 UTC+8
loc := time.FixedZone("UTC+8", 8*60*60)
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.UpdateAt, loc)
if err != nil {
return time.Now()
}
@ -123,19 +127,19 @@ type RefreshTokenResp struct {
type UserInfoResp struct {
BaseResp
Data struct {
UID int64 `json:"uid"`
Username string `json:"username"`
DisplayName string `json:"displayName"`
HeadImage string `json:"headImage"`
Passport string `json:"passport"`
Mail string `json:"mail"`
SpaceUsed int64 `json:"spaceUsed"`
SpacePermanent int64 `json:"spacePermanent"`
SpaceTemp int64 `json:"spaceTemp"`
SpaceTempExpr string `json:"spaceTempExpr"`
Vip bool `json:"vip"`
DirectTraffic int64 `json:"directTraffic"`
IsHideUID bool `json:"isHideUID"`
UID uint64 `json:"uid"`
// Username string `json:"username"`
// DisplayName string `json:"displayName"`
// HeadImage string `json:"headImage"`
// Passport string `json:"passport"`
// Mail string `json:"mail"`
// SpaceUsed int64 `json:"spaceUsed"`
// SpacePermanent int64 `json:"spacePermanent"`
// SpaceTemp int64 `json:"spaceTemp"`
// SpaceTempExpr int64 `json:"spaceTempExpr"`
// Vip bool `json:"vip"`
// DirectTraffic int64 `json:"directTraffic"`
// IsHideUID bool `json:"isHideUID"`
} `json:"data"`
}
@ -154,52 +158,30 @@ type DownloadInfoResp struct {
} `json:"data"`
}
type DirectLinkResp struct {
BaseResp
Data struct {
URL string `json:"url"`
} `json:"data"`
}
// 创建文件V2返回
type UploadCreateResp struct {
BaseResp
Data struct {
FileID int64 `json:"fileID"`
PreuploadID string `json:"preuploadID"`
Reuse bool `json:"reuse"`
SliceSize int64 `json:"sliceSize"`
FileID int64 `json:"fileID"`
PreuploadID string `json:"preuploadID"`
Reuse bool `json:"reuse"`
SliceSize int64 `json:"sliceSize"`
Servers []string `json:"servers"`
} `json:"data"`
}
type UploadUrlResp struct {
BaseResp
Data struct {
PresignedURL string `json:"presignedURL"`
}
}
// 上传完毕V2返回
type UploadCompleteResp struct {
BaseResp
Data struct {
Async bool `json:"async"`
Completed bool `json:"completed"`
FileID int64 `json:"fileID"`
} `json:"data"`
}
type UploadAsyncResp struct {
BaseResp
Data struct {
Completed bool `json:"completed"`
FileID int64 `json:"fileID"`
} `json:"data"`
}
type UploadResp struct {
BaseResp
Data struct {
AccessKeyId string `json:"AccessKeyId"`
Bucket string `json:"Bucket"`
Key string `json:"Key"`
SecretAccessKey string `json:"SecretAccessKey"`
SessionToken string `json:"SessionToken"`
FileId int64 `json:"FileId"`
Reuse bool `json:"Reuse"`
EndPoint string `json:"EndPoint"`
StorageNode string `json:"StorageNode"`
UploadId string `json:"UploadId"`
} `json:"data"`
}

View File

@ -1,21 +1,28 @@
package _123_open
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"strconv"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
)
// 创建文件 V2
func (d *Open123) create(parentFileID int64, filename string, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) {
var resp UploadCreateResp
_, err := d.Request(UploadCreate, http.MethodPost, func(req *resty.Request) {
@ -34,21 +41,136 @@ func (d *Open123) create(parentFileID int64, filename string, etag string, size
return &resp, nil
}
func (d *Open123) url(preuploadID string, sliceNo int64) (string, error) {
// get upload url
var resp UploadUrlResp
_, err := d.Request(UploadUrl, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"preuploadId": preuploadID,
"sliceNo": sliceNo,
})
}, &resp)
// 上传分片 V2
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
uploadDomain := createResp.Data.Servers[0]
size := file.GetSize()
chunkSize := createResp.Data.SliceSize
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return "", err
return err
}
return resp.Data.PresignedURL, nil
uploadNums := (size + chunkSize - 1) / chunkSize
thread := min(int(uploadNums), d.UploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for partIndex := range uploadNums {
if utils.IsCanceled(uploadCtx) {
break
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片号从1开始
offset := partIndex * chunkSize
size := min(chunkSize, size-offset)
var reader *stream.SectionReader
var rateLimitedRd io.Reader
sliceMD5 := ""
// 表单
b := bytes.NewBuffer(make([]byte, 0, 2048))
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
// 每个分片一个reader
reader, err = ss.GetSectionReader(offset, size)
if err != nil {
return err
}
// 计算当前分片的MD5
sliceMD5, err = utils.HashReader(utils.MD5, reader)
if err != nil {
return err
}
}
return nil
},
Do: func(ctx context.Context) error {
// 重置分片reader位置因为HashReader、上一次失败已经读取到分片EOF
reader.Seek(0, io.SeekStart)
b.Reset()
w := multipart.NewWriter(b)
// 添加表单字段
err = w.WriteField("preuploadID", createResp.Data.PreuploadID)
if err != nil {
return err
}
err = w.WriteField("sliceNo", strconv.FormatInt(partNumber, 10))
if err != nil {
return err
}
err = w.WriteField("sliceMD5", sliceMD5)
if err != nil {
return err
}
// 写入文件内容
_, err = w.CreateFormFile("slice", fmt.Sprintf("%s.part%d", file.GetName(), partNumber))
if err != nil {
return err
}
headSize := b.Len()
err = w.Close()
if err != nil {
return err
}
head := bytes.NewReader(b.Bytes()[:headSize])
tail := bytes.NewReader(b.Bytes()[headSize:])
rateLimitedRd = driver.NewLimitedUploadStream(ctx, io.MultiReader(head, reader, tail))
// 创建请求并设置header
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadDomain+"/upload/v2/file/slice", rateLimitedRd)
if err != nil {
return err
}
// 设置请求头
req.Header.Add("Authorization", "Bearer "+d.AccessToken)
req.Header.Add("Content-Type", w.FormDataContentType())
req.Header.Add("Platform", "open_platform")
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return fmt.Errorf("slice %d upload failed, status code: %d", partNumber, res.StatusCode)
}
var resp BaseResp
respBody, err := io.ReadAll(res.Body)
if err != nil {
return err
}
err = json.Unmarshal(respBody, &resp)
if err != nil {
return err
}
if resp.Code != 0 {
return fmt.Errorf("slice %d upload failed: %s", partNumber, resp.Message)
}
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
up(progress)
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
})
}
if err := threadG.Wait(); err != nil {
return err
}
return nil
}
// 上传完毕
func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
var resp UploadCompleteResp
_, err := d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
@ -61,91 +183,3 @@ func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
}
return &resp, nil
}
func (d *Open123) async(preuploadID string) (*UploadAsyncResp, error) {
var resp UploadAsyncResp
_, err := d.Request(UploadAsync, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"preuploadID": preuploadID,
})
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
size := file.GetSize()
chunkSize := createResp.Data.SliceSize
uploadNums := (size + chunkSize - 1) / chunkSize
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.UploadThread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for partIndex := int64(0); partIndex < uploadNums; partIndex++ {
if utils.IsCanceled(uploadCtx) {
return ctx.Err()
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片号从1开始
offset := partIndex * chunkSize
size := min(chunkSize, size-offset)
limitedReader, err := file.RangeRead(http_range.Range{
Start: offset,
Length: size})
if err != nil {
return err
}
limitedReader = driver.NewLimitedUploadStream(ctx, limitedReader)
threadG.Go(func(ctx context.Context) error {
uploadPartUrl, err := d.url(createResp.Data.PreuploadID, partNumber)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, "PUT", uploadPartUrl, limitedReader)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = size
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
_ = res.Body.Close()
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
up(progress)
return nil
})
}
if err := threadG.Wait(); err != nil {
return err
}
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
if err != nil {
return err
}
if uploadCompleteResp.Data.Async == false || uploadCompleteResp.Data.Completed {
return nil
}
for {
uploadAsyncResp, err := d.async(createResp.Data.PreuploadID)
if err != nil {
return err
}
if uploadAsyncResp.Data.Completed {
break
}
}
up(100)
return nil
}

View File

@ -1,15 +1,20 @@
package _123_open
import (
"crypto/md5"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/go-resty/resty/v2"
"github.com/google/uuid"
log "github.com/sirupsen/logrus"
)
@ -19,16 +24,15 @@ var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于
AccessToken = InitApiInfo(Api+"/api/v1/access_token", 1)
RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1)
UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1)
FileList = InitApiInfo(Api+"/api/v2/file/list", 4)
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 0)
FileList = InitApiInfo(Api+"/api/v2/file/list", 3)
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 5)
DirectLink = InitApiInfo(Api+"/api/v1/direct-link/url", 5)
Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2)
Move = InitApiInfo(Api+"/api/v1/file/move", 1)
Rename = InitApiInfo(Api+"/api/v1/file/name", 1)
Trash = InitApiInfo(Api+"/api/v1/file/trash", 2)
UploadCreate = InitApiInfo(Api+"/upload/v1/file/create", 2)
UploadUrl = InitApiInfo(Api+"/upload/v1/file/get_upload_url", 0)
UploadComplete = InitApiInfo(Api+"/upload/v1/file/upload_complete", 0)
UploadAsync = InitApiInfo(Api+"/upload/v1/file/upload_async_result", 1)
UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2)
UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0)
)
func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
@ -82,8 +86,24 @@ func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCall
}
func (d *Open123) flushAccessToken() error {
if d.Addition.ClientID != "" {
if d.Addition.ClientSecret != "" {
if d.ClientID != "" {
if d.RefreshToken != "" {
var resp RefreshTokenResp
_, err := d.Request(RefreshToken, http.MethodPost, func(req *resty.Request) {
req.SetQueryParam("client_id", d.ClientID)
if d.ClientSecret != "" {
req.SetQueryParam("client_secret", d.ClientSecret)
}
req.SetQueryParam("grant_type", "refresh_token")
req.SetQueryParam("refresh_token", d.RefreshToken)
}, &resp)
if err != nil {
return err
}
d.AccessToken = resp.AccessToken
d.RefreshToken = resp.RefreshToken
op.MustSaveDriverStorage(d)
} else if d.ClientSecret != "" {
var resp AccessTokenResp
_, err := d.Request(AccessToken, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
@ -96,24 +116,38 @@ func (d *Open123) flushAccessToken() error {
}
d.AccessToken = resp.Data.AccessToken
op.MustSaveDriverStorage(d)
} else if d.Addition.RefreshToken != "" {
var resp RefreshTokenResp
_, err := d.Request(RefreshToken, http.MethodPost, func(req *resty.Request) {
req.SetQueryParam("client_id", d.ClientID)
req.SetQueryParam("grant_type", "refresh_token")
req.SetQueryParam("refresh_token", d.Addition.RefreshToken)
}, &resp)
if err != nil {
return err
}
d.AccessToken = resp.AccessToken
d.RefreshToken = resp.RefreshToken
op.MustSaveDriverStorage(d)
}
}
return nil
}
func (d *Open123) SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (newURL string, err error) {
// 生成Unix时间戳
ts := time.Now().Add(validDuration).Unix()
// 生成随机数建议使用UUID不能包含中划线-
rand := strings.ReplaceAll(uuid.New().String(), "-", "")
// 解析URL
objURL, err := url.Parse(originURL)
if err != nil {
return "", err
}
// 待签名字符串格式path-timestamp-rand-uid-privateKey
unsignedStr := fmt.Sprintf("%s-%d-%s-%d-%s", objURL.Path, ts, rand, uid, privateKey)
md5Hash := md5.Sum([]byte(unsignedStr))
// 生成鉴权参数格式timestamp-rand-uid-md5hash
authKey := fmt.Sprintf("%d-%s-%d-%x", ts, rand, uid, md5Hash)
// 添加鉴权参数到URL查询参数
v := objURL.Query()
v.Add("auth_key", authKey)
objURL.RawQuery = v.Encode()
return objURL.String(), nil
}
func (d *Open123) getUserInfo() (*UserInfoResp, error) {
var resp UserInfoResp
@ -124,6 +158,18 @@ func (d *Open123) getUserInfo() (*UserInfoResp, error) {
return &resp, nil
}
func (d *Open123) getUID() (uint64, error) {
if d.UID != 0 {
return d.UID, nil
}
resp, err := d.getUserInfo()
if err != nil {
return 0, err
}
d.UID = resp.Data.UID
return resp.Data.UID, nil
}
func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*FileListResp, error) {
var resp FileListResp
@ -161,6 +207,21 @@ func (d *Open123) getDownloadInfo(fileId int64) (*DownloadInfoResp, error) {
return &resp, nil
}
func (d *Open123) getDirectLink(fileId int64) (*DirectLinkResp, error) {
var resp DirectLinkResp
_, err := d.Request(DirectLink, http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"fileID": strconv.FormatInt(fileId, 10),
})
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Open123) mkdir(parentID int64, name string) error {
_, err := d.Request(Mkdir, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{

View File

@ -70,14 +70,6 @@ func (d *Pan123Share) List(ctx context.Context, dir model.Obj, args model.ListAr
func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
// TODO return link of file, required
if f, ok := file.(File); ok {
//var resp DownResp
var headers map[string]string
if !utils.IsLocalIPAddr(args.IP) {
headers = map[string]string{
//"X-Real-IP": "1.1.1.1",
"X-Forwarded-For": args.IP,
}
}
data := base.Json{
"shareKey": d.ShareKey,
"SharePwd": d.SharePwd,
@ -87,25 +79,27 @@ func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkA
"size": f.Size,
}
resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetHeaders(headers)
req.SetBody(data)
}, nil)
if err != nil {
return nil, err
}
downloadUrl := utils.Json.Get(resp, "data", "DownloadURL").ToString()
u, err := url.Parse(downloadUrl)
ou, err := url.Parse(downloadUrl)
if err != nil {
return nil, err
}
nu := u.Query().Get("params")
u_ := ou.String()
nu := ou.Query().Get("params")
if nu != "" {
du, _ := base64.StdEncoding.DecodeString(nu)
u, err = url.Parse(string(du))
u, err := url.Parse(string(du))
if err != nil {
return nil, err
}
u_ = u.String()
}
u_ := u.String()
log.Debug("download url: ", u_)
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
if err != nil {
@ -122,7 +116,7 @@ func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkA
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
}
link.Header = http.Header{
"Referer": []string{"https://www.123pan.com/"},
"Referer": []string{fmt.Sprintf("%s://%s/", ou.Scheme, ou.Host)},
}
return &link, nil
}

View File

@ -522,32 +522,27 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
var err error
fullHash := stream.GetHash().GetHash(utils.SHA256)
if len(fullHash) != utils.SHA256.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA256)
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA256)
if err != nil {
return err
}
}
size := stream.GetSize()
var partSize = d.getPartSize(size)
part := size / partSize
if size%partSize > 0 {
part++
} else if part == 0 {
part = 1
partSize := d.getPartSize(size)
part := int64(1)
if size > partSize {
part = (size + partSize - 1) / partSize
}
// 生成所有 partInfos
partInfos := make([]PartInfo, 0, part)
for i := int64(0); i < part; i++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
start := i * partSize
byteSize := size - start
if byteSize > partSize {
byteSize = partSize
}
byteSize := min(size-start, partSize)
partNumber := i + 1
partInfo := PartInfo{
PartNumber: partNumber,
@ -595,17 +590,20 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
// resp.Data.RapidUpload: true 支持快传,但此处直接检测是否返回分片的上传地址
// 快传的情况下同样需要手动处理冲突
if resp.Data.PartInfos != nil {
// 读取前100个分片的上传地址
uploadPartInfos := resp.Data.PartInfos
// Progress
p := driver.NewProgress(size, up)
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
// 获取后续分片的上传地址
for i := 101; i < len(partInfos); i += 100 {
end := i + 100
if end > len(partInfos) {
end = len(partInfos)
}
// 先上传前100个分片
err = d.uploadPersonalParts(ctx, partInfos, resp.Data.PartInfos, rateLimited, p)
if err != nil {
return err
}
// 如果还有剩余分片,分批获取上传地址并上传
for i := 100; i < len(partInfos); i += 100 {
end := min(i+100, len(partInfos))
batchPartInfos := partInfos[i:end]
moredata := base.Json{
"fileId": resp.Data.FileId,
"uploadId": resp.Data.UploadId,
@ -621,45 +619,13 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
if err != nil {
return err
}
uploadPartInfos = append(uploadPartInfos, moreresp.Data.PartInfos...)
}
// Progress
p := driver.NewProgress(size, up)
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
// 上传所有分片
for _, uploadPartInfo := range uploadPartInfos {
index := uploadPartInfo.PartNumber - 1
partSize := partInfos[index].PartSize
log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos))
limitReader := io.LimitReader(rateLimited, partSize)
// Update Progress
r := io.TeeReader(limitReader, p)
req, err := http.NewRequest("PUT", uploadPartInfo.UploadUrl, r)
err = d.uploadPersonalParts(ctx, partInfos, moreresp.Data.PartInfos, rateLimited, p)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", fmt.Sprint(partSize))
req.Header.Set("Origin", "https://yun.139.com")
req.Header.Set("Referer", "https://yun.139.com/")
req.ContentLength = partSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
_ = res.Body.Close()
log.Debugf("[139] uploaded: %+v", res)
if res.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
}
}
// 全部分片上传完毕后complete
data = base.Json{
"contentHash": fullHash,
"contentHashAlgorithm": "SHA256",
@ -788,12 +754,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
size := stream.GetSize()
// Progress
p := driver.NewProgress(size, up)
var partSize = d.getPartSize(size)
part := size / partSize
if size%partSize > 0 {
part++
} else if part == 0 {
part = 1
partSize := d.getPartSize(size)
part := int64(1)
if size > partSize {
part = (size + partSize - 1) / partSize
}
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
for i := int64(0); i < part; i++ {
@ -807,12 +771,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
limitReader := io.LimitReader(rateLimited, byteSize)
// Update Progress
r := io.TeeReader(limitReader, p)
req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, resp.Data.UploadResult.RedirectionURL, r)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName()))
req.Header.Set("contentSize", strconv.FormatInt(size, 10))
req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1))

View File

@ -1,9 +1,11 @@
package _139
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
@ -13,6 +15,7 @@ import (
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
@ -623,3 +626,47 @@ func (d *Yun139) getPersonalCloudHost() string {
}
return d.PersonalCloudHost
}
func (d *Yun139) uploadPersonalParts(ctx context.Context, partInfos []PartInfo, uploadPartInfos []PersonalPartInfo, rateLimited *driver.RateLimitReader, p *driver.Progress) error {
// 确保数组以 PartNumber 从小到大排序
sort.Slice(uploadPartInfos, func(i, j int) bool {
return uploadPartInfos[i].PartNumber < uploadPartInfos[j].PartNumber
})
for _, uploadPartInfo := range uploadPartInfos {
index := uploadPartInfo.PartNumber - 1
if index < 0 || index >= len(partInfos) {
return fmt.Errorf("invalid PartNumber %d: index out of bounds (partInfos length: %d)", uploadPartInfo.PartNumber, len(partInfos))
}
partSize := partInfos[index].PartSize
log.Debugf("[139] uploading part %+v/%+v", index, len(partInfos))
limitReader := io.LimitReader(rateLimited, partSize)
r := io.TeeReader(limitReader, p)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartInfo.UploadUrl, r)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", fmt.Sprint(partSize))
req.Header.Set("Origin", "https://yun.139.com")
req.Header.Set("Referer", "https://yun.139.com/")
req.ContentLength = partSize
err = func() error {
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
log.Debugf("[139] uploaded: %+v", res)
if res.StatusCode != http.StatusOK {
body, _ := io.ReadAll(res.Body)
return fmt.Errorf("unexpected status code: %d, body: %s", res.StatusCode, string(body))
}
return nil
}()
if err != nil {
return err
}
}
return nil
}

View File

@ -365,11 +365,10 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
log.Debugf("uploadData: %+v", uploadData)
requestURL := uploadData.RequestURL
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
req, err := http.NewRequestWithContext(ctx, http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
for _, v := range uploadHeaders {
i := strings.Index(v, "=")
req.Header.Set(v[0:i], v[i+1:])

View File

@ -5,17 +5,19 @@ import (
"encoding/base64"
"encoding/xml"
"fmt"
"github.com/skip2/go-qrcode"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/skip2/go-qrcode"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
@ -129,6 +131,7 @@ func (y *Cloud189TV) put(ctx context.Context, url string, headers map[string]str
}
}
// 请求完成后http.Client会Close Request.Body
resp, err := base.HttpClient.Do(req)
if err != nil {
return nil, err
@ -311,11 +314,14 @@ func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
// 旧版本上传,家庭云不支持覆盖
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, err
fileMd5 := file.GetHash().GetHash(utils.MD5)
var tempFile = file.GetFile()
var err error
if len(fileMd5) != utils.MD5.Width {
tempFile, fileMd5, err = stream.CacheFullAndHash(file, &up, utils.MD5)
} else if tempFile == nil {
tempFile, err = file.CacheFullAndWriter(&up, nil)
}
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
if err != nil {
return nil, err
}
@ -328,6 +334,10 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
// 网盘中不存在该文件,开始上传
status := GetUploadFileStatusResp{CreateUploadFileResp: *uploadInfo}
// driver.RateLimitReader会尝试Close底层的reader
// 但这里的tempFile是一个*os.FileClose后就没法继续读了
// 所以这里用io.NopCloser包一层
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile))
for status.GetSize() < file.GetSize() && status.FileDataExists != 1 {
if utils.IsCanceled(ctx) {
return nil, ctx.Err()
@ -345,7 +355,7 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
}
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily)
_, err := y.put(ctx, status.FileUploadUrl, header, true, rateLimitedRd, isFamily)
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
return nil, err
}

View File

@ -7,6 +7,7 @@ import (
"encoding/hex"
"encoding/xml"
"fmt"
"hash"
"io"
"net/http"
"net/http/cookiejar"
@ -471,14 +472,16 @@ func (y *Cloud189PC) refreshSession() (err error) {
// 普通上传
// 无法上传大小为0的文件
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
size := file.GetSize()
sliceSize := partSize(size)
// 文件大小
fileSize := file.GetSize()
// 分片大小,不得为文件大小
sliceSize := partSize(fileSize)
params := Params{
"parentFolderId": dstDir.GetID(),
"fileName": url.QueryEscape(file.GetName()),
"fileSize": fmt.Sprint(file.GetSize()),
"sliceSize": fmt.Sprint(sliceSize),
"fileSize": fmt.Sprint(fileSize),
"sliceSize": fmt.Sprint(sliceSize), // 必须为特定分片大小
"lazyCheck": "1",
}
@ -500,66 +503,101 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
return nil, err
}
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
ss, err := stream.NewStreamSectionReader(file, int(sliceSize), &up)
if err != nil {
return nil, err
}
threadG, upCtx := errgroup.NewOrderedGroupWithContext(ctx, y.uploadThread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
count := int(size / sliceSize)
lastPartSize := size % sliceSize
if lastPartSize > 0 {
count++
} else {
count := 1
if fileSize > sliceSize {
count = int((fileSize + sliceSize - 1) / sliceSize)
}
lastPartSize := fileSize % sliceSize
if lastPartSize == 0 {
lastPartSize = sliceSize
}
fileMd5 := utils.MD5.NewFunc()
silceMd5 := utils.MD5.NewFunc()
silceMd5Hexs := make([]string, 0, count)
teeReader := io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5))
byteSize := sliceSize
silceMd5 := utils.MD5.NewFunc()
var writers io.Writer = silceMd5
fileMd5Hex := file.GetHash().GetHash(utils.MD5)
var fileMd5 hash.Hash
if len(fileMd5Hex) != utils.MD5.Width {
fileMd5 = utils.MD5.NewFunc()
writers = io.MultiWriter(silceMd5, fileMd5)
}
for i := 1; i <= count; i++ {
if utils.IsCanceled(upCtx) {
break
}
offset := int64((i)-1) * sliceSize
partSize := sliceSize
if i == count {
byteSize = lastPartSize
}
byteData := make([]byte, byteSize)
// 读取块
silceMd5.Reset()
if _, err := io.ReadFull(teeReader, byteData); err != io.EOF && err != nil {
return nil, err
partSize = lastPartSize
}
partInfo := ""
var reader *stream.SectionReader
var rateLimitedRd io.Reader
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
reader, err = ss.GetSectionReader(offset, partSize)
if err != nil {
return err
}
silceMd5.Reset()
w, err := utils.CopyWithBuffer(writers, reader)
if w != partSize {
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", partSize, w, err)
}
// 计算块md5并进行hex和base64编码
md5Bytes := silceMd5.Sum(nil)
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
partInfo = fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
// 计算块md5并进行hex和base64编码
md5Bytes := silceMd5.Sum(nil)
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
return nil
},
Do: func(ctx context.Context) error {
reader.Seek(0, io.SeekStart)
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
if err != nil {
return err
}
threadG.Go(func(ctx context.Context) error {
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
if err != nil {
return err
}
// step.4 上传切片
uploadUrl := uploadUrls[0]
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily)
if err != nil {
return err
}
up(float64(threadG.Success()) * 100 / float64(count))
return nil
})
// step.4 上传切片
uploadUrl := uploadUrls[0]
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
driver.NewLimitedUploadStream(ctx, rateLimitedRd), isFamily)
if err != nil {
return err
}
up(float64(threadG.Success()) * 100 / float64(count))
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
},
)
}
if err = threadG.Wait(); err != nil {
return nil, err
}
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
if fileMd5 != nil {
fileMd5Hex = strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
}
sliceMd5Hex := fileMd5Hex
if file.GetSize() > sliceSize {
if fileSize > sliceSize {
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
}
@ -620,11 +658,12 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
cache = tmpF
}
sliceSize := partSize(size)
count := int(size / sliceSize)
count := 1
if size > sliceSize {
count = int((size + sliceSize - 1) / sliceSize)
}
lastSliceSize := size % sliceSize
if lastSliceSize > 0 {
count++
} else {
if lastSliceSize == 0 {
lastSliceSize = sliceSize
}
@ -738,7 +777,8 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
}
// step.4 上传切片
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(cache, offset, byteSize), isFamily)
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize))
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily)
if err != nil {
return err
}
@ -820,9 +860,7 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo
// 旧版本上传,家庭云不支持覆盖
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
tempFile, fileMd5, err := stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
tempFile, fileMd5, err := stream.CacheFullAndHash(file, &up, utils.MD5)
if err != nil {
return nil, err
}

View File

@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"io"
"net/url"
stdpath "path"
"strings"
@ -12,6 +13,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
@ -78,10 +80,18 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
obj, err := d.get(ctx, path, dst, sub)
if err == nil {
return obj, nil
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
if err != nil {
continue
}
return &model.Object{
Path: path,
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}, nil
}
return nil, errs.ObjectNotFound
}
@ -99,7 +109,27 @@ func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
var objs []model.Obj
fsArgs := &fs.ListArgs{NoLog: true, Refresh: args.Refresh}
for _, dst := range dsts {
tmp, err := d.list(ctx, dst, sub, fsArgs)
tmp, err := fs.List(ctx, stdpath.Join(dst, sub), fsArgs)
if err == nil {
tmp, err = utils.SliceConvert(tmp, func(obj model.Obj) (model.Obj, error) {
thumb, ok := model.GetThumb(obj)
objRes := model.Object{
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
}
if !ok {
return &objRes, nil
}
return &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
}, nil
})
}
if err == nil {
objs = append(objs, tmp...)
}
@ -113,45 +143,45 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if !ok {
return nil, errs.ObjectNotFound
}
// proxy || ftp,s3
if common.GetApiUrl(ctx) == "" {
args.Redirect = false
}
for _, dst := range dsts {
reqPath := stdpath.Join(dst, sub)
link, file, err := d.link(ctx, reqPath, args)
link, fi, err := d.link(ctx, reqPath, args)
if err != nil {
continue
}
var resultLink *model.Link
if link != nil {
resultLink = &model.Link{
URL: link.URL,
Header: link.Header,
RangeReader: link.RangeReader,
SyncClosers: utils.NewSyncClosers(link),
ContentLength: link.ContentLength,
}
if link.MFile != nil {
resultLink.RangeReader = &model.FileRangeReader{
RangeReaderIF: stream.GetRangeReaderFromMFile(file.GetSize(), link.MFile),
}
}
} else {
resultLink = &model.Link{
if link == nil {
// 重定向且需要通过代理
return &model.Link{
URL: fmt.Sprintf("%s/p%s?sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
sign.Sign(reqPath)),
}
}, nil
}
resultLink := *link
resultLink.SyncClosers = utils.NewSyncClosers(link)
if args.Redirect {
return &resultLink, nil
}
if !args.Redirect {
if d.DownloadConcurrency > 0 {
resultLink.Concurrency = d.DownloadConcurrency
}
if d.DownloadPartSize > 0 {
resultLink.PartSize = d.DownloadPartSize * utils.KB
}
if resultLink.ContentLength == 0 {
resultLink.ContentLength = fi.GetSize()
}
return resultLink, nil
if resultLink.MFile != nil {
return &resultLink, nil
}
if d.DownloadConcurrency > 0 {
resultLink.Concurrency = d.DownloadConcurrency
}
if d.DownloadPartSize > 0 {
resultLink.PartSize = d.DownloadPartSize * utils.KB
}
return &resultLink, nil
}
return nil, errs.ObjectNotFound
}
@ -278,24 +308,29 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer,
reqPath, err := d.getReqPath(ctx, dstDir, true)
if err == nil {
if len(reqPath) == 1 {
return fs.PutDirectly(ctx, *reqPath[0], &stream.FileStream{
Obj: s,
Mimetype: s.GetMimetype(),
WebPutAsTask: s.NeedStore(),
Reader: s,
})
} else {
file, err := s.CacheFullInTempFile()
storage, reqActualPath, err := op.GetStorageAndActualPath(*reqPath[0])
if err != nil {
return err
}
for _, path := range reqPath {
return op.Put(ctx, storage, reqActualPath, &stream.FileStream{
Obj: s,
Mimetype: s.GetMimetype(),
Reader: s,
}, up)
} else {
file, err := s.CacheFullAndWriter(nil, nil)
if err != nil {
return err
}
count := float64(len(reqPath) + 1)
up(100 / count)
for i, path := range reqPath {
err = errors.Join(err, fs.PutDirectly(ctx, *path, &stream.FileStream{
Obj: s,
Mimetype: s.GetMimetype(),
WebPutAsTask: s.NeedStore(),
Reader: file,
Obj: s,
Mimetype: s.GetMimetype(),
Reader: file,
}))
up(float64(i+2) / float64(count) * 100)
_, e := file.Seek(0, io.SeekStart)
if e != nil {
return errors.Join(err, e)
@ -367,10 +402,24 @@ func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveIn
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
link, err := d.extract(ctx, dst, sub, args)
if err == nil {
return link, nil
reqPath := stdpath.Join(dst, sub)
link, err := d.extract(ctx, reqPath, args)
if err != nil {
continue
}
if link == nil {
return &model.Link{
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}, nil
}
resultLink := *link
resultLink.SyncClosers = utils.NewSyncClosers(link)
return &resultLink, nil
}
return nil, errs.NotImplement
}

View File

@ -2,8 +2,6 @@ package alias
import (
"context"
"fmt"
"net/url"
stdpath "path"
"strings"
@ -12,8 +10,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
)
@ -54,55 +50,12 @@ func (d *Alias) getRootAndPath(path string) (string, string) {
return parts[0], parts[1]
}
func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Obj, error) {
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
if err != nil {
return nil, err
}
return &model.Object{
Path: path,
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}, nil
}
func (d *Alias) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]model.Obj, error) {
objs, err := fs.List(ctx, stdpath.Join(dst, sub), args)
// the obj must implement the model.SetPath interface
// return objs, err
if err != nil {
return nil, err
}
return utils.SliceConvert(objs, func(obj model.Obj) (model.Obj, error) {
thumb, ok := model.GetThumb(obj)
objRes := model.Object{
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
}
if !ok {
return &objRes, nil
}
return &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
}, nil
})
}
func (d *Alias) link(ctx context.Context, reqPath string, args model.LinkArgs) (*model.Link, model.Obj, error) {
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil {
return nil, nil, err
}
// proxy || ftp,s3
if !args.Redirect || len(common.GetApiUrl(ctx)) == 0 {
if !args.Redirect {
return op.Link(ctx, storage, reqActualPath, args)
}
obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
@ -183,8 +136,7 @@ func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.Arc
return nil, errs.NotImplement
}
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) {
reqPath := stdpath.Join(dst, sub)
func (d *Alias) extract(ctx context.Context, reqPath string, args model.ArchiveInnerArgs) (*model.Link, error) {
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil {
return nil, err
@ -192,20 +144,12 @@ func (d *Alias) extract(ctx context.Context, dst, sub string, args model.Archive
if _, ok := storage.(driver.ArchiveReader); !ok {
return nil, errs.NotImplement
}
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(sub)) {
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil {
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(reqPath)) {
_, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err == nil {
return nil, err
}
link := &model.Link{
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}
return link, nil
return nil, nil
}
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
return link, err

View File

@ -297,11 +297,10 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
if d.InternalUpload {
url = partInfo.InternalUploadUrl
}
req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT))
req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, io.LimitReader(rateLimited, DEFAULT))
if err != nil {
return err
}
req = req.WithContext(ctx)
res, err := base.HttpClient.Do(req)
if err != nil {
return err

View File

@ -3,7 +3,6 @@ package aliyundrive_open
import (
"context"
"errors"
"fmt"
"net/http"
"path/filepath"
"time"
@ -13,7 +12,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/rateg"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
@ -24,9 +22,8 @@ type AliyundriveOpen struct {
DriveId string
limitList func(ctx context.Context, data base.Json) (*Files, error)
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
ref *AliyundriveOpen
limiter *limiter
ref *AliyundriveOpen
}
func (d *AliyundriveOpen) Config() driver.Config {
@ -38,25 +35,23 @@ func (d *AliyundriveOpen) GetAddition() driver.Additional {
}
func (d *AliyundriveOpen) Init(ctx context.Context) error {
d.limiter = getLimiterForUser(globalLimiterUserID) // First create a globally shared limiter to limit the initial requests.
if d.LIVPDownloadFormat == "" {
d.LIVPDownloadFormat = "jpeg"
}
if d.DriveType == "" {
d.DriveType = "default"
}
res, err := d.request("/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
res, err := d.request(ctx, limiterOther, "/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
if err != nil {
d.limiter.free()
d.limiter = nil
return err
}
d.DriveId = utils.Json.Get(res, d.DriveType+"_drive_id").ToString()
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
Limit: 4,
Bucket: 1,
})
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
Limit: 1,
Bucket: 1,
})
userid := utils.Json.Get(res, "user_id").ToString()
d.limiter.free()
d.limiter = getLimiterForUser(userid) // Allocate a corresponding limiter for each user.
return nil
}
@ -70,6 +65,8 @@ func (d *AliyundriveOpen) InitReference(storage driver.Driver) error {
}
func (d *AliyundriveOpen) Drop(ctx context.Context) error {
d.limiter.free()
d.limiter = nil
d.ref = nil
return nil
}
@ -87,9 +84,6 @@ func (d *AliyundriveOpen) GetRoot(ctx context.Context) (model.Obj, error) {
}
func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if d.limitList == nil {
return nil, fmt.Errorf("driver not init")
}
files, err := d.getFiles(ctx, dir.GetID())
if err != nil {
return nil, err
@ -107,8 +101,8 @@ func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.Li
return objs, err
}
func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link, error) {
res, err := d.request("/adrive/v1.0/openFile/getDownloadUrl", http.MethodPost, func(req *resty.Request) {
func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
res, err := d.request(ctx, limiterLink, "/adrive/v1.0/openFile/getDownloadUrl", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": file.GetID(),
@ -132,17 +126,10 @@ func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link
}, nil
}
func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.limitLink == nil {
return nil, fmt.Errorf("driver not init")
}
return d.limitLink(ctx, file)
}
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
nowTime, _ := getNowTime()
newDir := File{CreatedAt: nowTime, UpdatedAt: nowTime}
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"parent_file_id": parentDir.GetID(),
@ -168,7 +155,7 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var resp MoveOrCopyResp
_, err := d.request("/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": srcObj.GetID(),
@ -198,7 +185,7 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (m
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
var newFile File
_, err := d.request("/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": srcObj.GetID(),
@ -230,7 +217,7 @@ func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
var resp MoveOrCopyResp
_, err := d.request("/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": srcObj.GetID(),
@ -256,7 +243,7 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
if d.RemoveWay == "delete" {
uri = "/adrive/v1.0/openFile/delete"
}
_, err := d.request(uri, http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, uri, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": obj.GetID(),
@ -295,7 +282,7 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
default:
return nil, errs.NotSupport
}
_, err := d.request(uri, http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, uri, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp)
})
if err != nil {

View File

@ -0,0 +1,96 @@
package aliyundrive_open
import (
"context"
"fmt"
"sync"
"golang.org/x/time/rate"
)
// See document https://www.yuque.com/aliyundrive/zpfszx/mqocg38hlxzc5vcd
// See issue https://github.com/OpenListTeam/OpenList/issues/724
// We got limit per user per app, so the limiter should be global.
type limiterType int
const (
limiterList limiterType = iota
limiterLink
limiterOther
)
const (
listRateLimit = 3.9 // 4 per second in document, but we use 3.9 per second to be safe
linkRateLimit = 0.9 // 1 per second in document, but we use 0.9 per second to be safe
otherRateLimit = 14.9 // 15 per second in document, but we use 14.9 per second to be safe
globalLimiterUserID = "" // Global limiter user ID, used to limit the initial requests.
)
type limiter struct {
usedBy int
list *rate.Limiter
link *rate.Limiter
other *rate.Limiter
}
var limiters = make(map[string]*limiter)
var limitersLock = &sync.Mutex{}
func getLimiterForUser(userid string) *limiter {
limitersLock.Lock()
defer limitersLock.Unlock()
defer func() {
// Clean up limiters that are no longer used.
for id, lim := range limiters {
if lim.usedBy <= 0 && id != globalLimiterUserID { // Do not delete the global limiter.
delete(limiters, id)
}
}
}()
if lim, ok := limiters[userid]; ok {
lim.usedBy++
return lim
}
lim := &limiter{
usedBy: 1,
list: rate.NewLimiter(rate.Limit(listRateLimit), 1),
link: rate.NewLimiter(rate.Limit(linkRateLimit), 1),
other: rate.NewLimiter(rate.Limit(otherRateLimit), 1),
}
limiters[userid] = lim
return lim
}
func (l *limiter) wait(ctx context.Context, typ limiterType) error {
if l == nil {
return fmt.Errorf("driver not init")
}
switch typ {
case limiterList:
return l.list.Wait(ctx)
case limiterLink:
return l.link.Wait(ctx)
case limiterOther:
return l.other.Wait(ctx)
default:
return fmt.Errorf("unknown limiter type")
}
}
func (l *limiter) free() {
if l == nil {
return
}
limitersLock.Lock()
defer limitersLock.Unlock()
l.usedBy--
}
func (d *AliyundriveOpen) wait(ctx context.Context, typ limiterType) error {
if d == nil {
return fmt.Errorf("driver not init")
}
if d.ref != nil {
return d.ref.wait(ctx, typ) // If this is a reference driver, wait on the reference driver.
}
return d.limiter.wait(ctx, typ)
}

View File

@ -50,10 +50,10 @@ func calPartSize(fileSize int64) int64 {
return partSize
}
func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]PartInfo, error) {
func (d *AliyundriveOpen) getUploadUrl(ctx context.Context, count int, fileId, uploadId string) ([]PartInfo, error) {
partInfoList := makePartInfos(count)
var resp CreateResp
_, err := d.request("/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": fileId,
@ -69,7 +69,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
if d.InternalUpload {
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
}
req, err := http.NewRequestWithContext(ctx, "PUT", uploadUrl, r)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, r)
if err != nil {
return err
}
@ -84,10 +84,10 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
return nil
}
func (d *AliyundriveOpen) completeUpload(fileId, uploadId string) (model.Obj, error) {
func (d *AliyundriveOpen) completeUpload(ctx context.Context, fileId, uploadId string) (model.Obj, error) {
// 3. complete
var newFile File
_, err := d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": fileId,
@ -137,11 +137,8 @@ func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error
}
buf := make([]byte, length)
n, err := io.ReadFull(reader, buf)
if err == io.ErrUnexpectedEOF {
return "", fmt.Errorf("can't read data, expected=%d, got=%d", len(buf), n)
}
if err != nil {
return "", err
if n != int(length) {
return "", fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
}
return base64.StdEncoding.EncodeToString(buf), nil
}
@ -183,7 +180,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
createData["pre_hash"] = hash
}
var createResp CreateResp
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
_, err, e := d.requestReturnErrResp(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp)
})
if err != nil {
@ -194,9 +191,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
hash := stream.GetHash().GetHash(utils.SHA1)
if len(hash) != utils.SHA1.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, hash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA1)
_, hash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
if err != nil {
return nil, err
}
@ -210,7 +205,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if err != nil {
return nil, fmt.Errorf("cal proof code error: %s", err.Error())
}
_, err = d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
_, err = d.request(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp)
})
if err != nil {
@ -221,17 +216,20 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if !createResp.RapidUpload {
// 2. normal upload
log.Debugf("[aliyundive_open] normal upload")
ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize), &up)
if err != nil {
return nil, err
}
preTime := time.Now()
var offset, length int64 = 0, partSize
//var length
for i := 0; i < len(createResp.PartInfoList); i++ {
if utils.IsCanceled(ctx) {
return nil, ctx.Err()
}
// refresh upload url if 50 minutes passed
if time.Since(preTime) > 50*time.Minute {
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
createResp.PartInfoList, err = d.getUploadUrl(ctx, count, createResp.FileId, createResp.UploadId)
if err != nil {
return nil, err
}
@ -240,22 +238,19 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if remain := stream.GetSize() - offset; length > remain {
length = remain
}
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
if rapidUpload {
srd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
if err != nil {
return nil, err
}
rd = utils.NewMultiReadable(srd)
rd, err := ss.GetSectionReader(offset, length)
if err != nil {
return nil, err
}
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
err = retry.Do(func() error {
_ = rd.Reset()
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
rd.Seek(0, io.SeekStart)
return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i])
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
ss.FreeSectionReader(rd)
if err != nil {
return nil, err
}
@ -268,5 +263,5 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp)
// 3. complete
return d.completeUpload(createResp.FileId, createResp.UploadId)
return d.completeUpload(ctx, createResp.FileId, createResp.UploadId)
}

View File

@ -19,7 +19,7 @@ import (
// do others that not defined in Driver interface
func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
func (d *AliyundriveOpen) _refreshToken(ctx context.Context) (string, string, error) {
if d.UseOnlineAPI && d.APIAddress != "" {
u := d.APIAddress
var resp struct {
@ -27,14 +27,17 @@ func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
AccessToken string `json:"access_token"`
ErrorMessage string `json:"text"`
}
// 根据AlipanType选项设置driver_txt
driverTxt := "alicloud_qr"
if d.AlipanType == "alipanTV" {
driverTxt = "alicloud_tv"
}
_, err := base.RestyClient.R().
err := d.wait(ctx, limiterOther)
if err != nil {
return "", "", err
}
_, err = base.RestyClient.R().
SetHeader("User-Agent", "Mozilla/5.0 (Macintosh; Apple macOS 15_5) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36 Chrome/138.0.0.0 Openlist/425.6.30").
SetResult(&resp).
SetQueryParams(map[string]string{
@ -54,11 +57,14 @@ func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
}
return resp.RefreshToken, resp.AccessToken, nil
}
// 本地刷新逻辑,必须要求 client_id 和 client_secret
if d.ClientID == "" || d.ClientSecret == "" {
return "", "", fmt.Errorf("empty ClientID or ClientSecret")
}
err := d.wait(ctx, limiterOther)
if err != nil {
return "", "", err
}
url := API_URL + "/oauth/access_token"
//var resp base.TokenResp
var e ErrResp
@ -110,18 +116,18 @@ func getSub(token string) (string, error) {
return utils.Json.Get(bs, "sub").ToString(), nil
}
func (d *AliyundriveOpen) refreshToken() error {
func (d *AliyundriveOpen) refreshToken(ctx context.Context) error {
if d.ref != nil {
return d.ref.refreshToken()
return d.ref.refreshToken(ctx)
}
refresh, access, err := d._refreshToken()
refresh, access, err := d._refreshToken(ctx)
for i := 0; i < 3; i++ {
if err == nil {
break
} else {
log.Errorf("[ali_open] failed to refresh token: %s", err)
}
refresh, access, err = d._refreshToken()
refresh, access, err = d._refreshToken(ctx)
}
if err != nil {
return err
@ -132,12 +138,12 @@ func (d *AliyundriveOpen) refreshToken() error {
return nil
}
func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
b, err, _ := d.requestReturnErrResp(uri, method, callback, retry...)
func (d *AliyundriveOpen) request(ctx context.Context, limitTy limiterType, uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
b, err, _ := d.requestReturnErrResp(ctx, limitTy, uri, method, callback, retry...)
return b, err
}
func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
func (d *AliyundriveOpen) requestReturnErrResp(ctx context.Context, limitTy limiterType, uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
req := base.RestyClient.R()
// TODO check whether access_token is expired
req.SetHeader("Authorization", "Bearer "+d.getAccessToken())
@ -149,6 +155,10 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
}
var e ErrResp
req.SetError(&e)
err := d.wait(ctx, limitTy)
if err != nil {
return nil, err, nil
}
res, err := req.Execute(method, API_URL+uri)
if err != nil {
if res != nil {
@ -159,11 +169,11 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
isRetry := len(retry) > 0 && retry[0]
if e.Code != "" {
if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.getAccessToken() == "") {
err = d.refreshToken()
err = d.refreshToken(ctx)
if err != nil {
return nil, err, nil
}
return d.requestReturnErrResp(uri, method, callback, true)
return d.requestReturnErrResp(ctx, limitTy, uri, method, callback, true)
}
return nil, fmt.Errorf("%s:%s", e.Code, e.Message), &e
}
@ -172,7 +182,7 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
func (d *AliyundriveOpen) list(ctx context.Context, data base.Json) (*Files, error) {
var resp Files
_, err := d.request("/adrive/v1.0/openFile/list", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterList, "/adrive/v1.0/openFile/list", http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp)
})
if err != nil {
@ -201,7 +211,7 @@ func (d *AliyundriveOpen) getFiles(ctx context.Context, fileId string) ([]File,
//"video_thumbnail_width": 480,
//"image_thumbnail_width": 480,
}
resp, err := d.limitList(ctx, data)
resp, err := d.list(ctx, data)
if err != nil {
return nil, err
}

View File

@ -2,7 +2,6 @@ package aliyundrive_share
import (
"context"
"fmt"
"net/http"
"time"
@ -12,7 +11,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/rateg"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
@ -25,8 +23,7 @@ type AliyundriveShare struct {
DriveId string
cron *cron.Cron
limitList func(ctx context.Context, dir model.Obj) ([]model.Obj, error)
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
limiter *limiter
}
func (d *AliyundriveShare) Config() driver.Config {
@ -38,29 +35,26 @@ func (d *AliyundriveShare) GetAddition() driver.Additional {
}
func (d *AliyundriveShare) Init(ctx context.Context) error {
err := d.refreshToken()
d.limiter = getLimiter()
err := d.refreshToken(ctx)
if err != nil {
d.limiter.free()
d.limiter = nil
return err
}
err = d.getShareToken()
err = d.getShareToken(ctx)
if err != nil {
d.limiter.free()
d.limiter = nil
return err
}
d.cron = cron.NewCron(time.Hour * 2)
d.cron.Do(func() {
err := d.refreshToken()
err := d.refreshToken(ctx)
if err != nil {
log.Errorf("%+v", err)
}
})
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
Limit: 4,
Bucket: 1,
})
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
Limit: 1,
Bucket: 1,
})
return nil
}
@ -68,19 +62,14 @@ func (d *AliyundriveShare) Drop(ctx context.Context) error {
if d.cron != nil {
d.cron.Stop()
}
d.limiter.free()
d.limiter = nil
d.DriveId = ""
return nil
}
func (d *AliyundriveShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if d.limitList == nil {
return nil, fmt.Errorf("driver not init")
}
return d.limitList(ctx, dir)
}
func (d *AliyundriveShare) list(ctx context.Context, dir model.Obj) ([]model.Obj, error) {
files, err := d.getFiles(dir.GetID())
files, err := d.getFiles(ctx, dir.GetID())
if err != nil {
return nil, err
}
@ -90,13 +79,6 @@ func (d *AliyundriveShare) list(ctx context.Context, dir model.Obj) ([]model.Obj
}
func (d *AliyundriveShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.limitLink == nil {
return nil, fmt.Errorf("driver not init")
}
return d.limitLink(ctx, file)
}
func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Link, error) {
data := base.Json{
"drive_id": d.DriveId,
"file_id": file.GetID(),
@ -105,7 +87,7 @@ func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Lin
"share_id": d.ShareId,
}
var resp ShareLinkResp
_, err := d.request("https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterLink, "https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) {
req.SetHeader(CanaryHeaderKey, CanaryHeaderValue).SetBody(data).SetResult(&resp)
})
if err != nil {
@ -135,7 +117,7 @@ func (d *AliyundriveShare) Other(ctx context.Context, args model.OtherArgs) (int
default:
return nil, errs.NotSupport
}
_, err := d.request(url, http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, url, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp)
})
if err != nil {

View File

@ -0,0 +1,67 @@
package aliyundrive_share
import (
"context"
"fmt"
"golang.org/x/time/rate"
)
// See issue https://github.com/OpenListTeam/OpenList/issues/724
// Seems there is no limit per user.
type limiterType int
const (
limiterList limiterType = iota
limiterLink
limiterOther
)
const (
listRateLimit = 3.9 // 4 per second in document, but we use 3.9 per second to be safe
linkRateLimit = 0.9 // 1 per second in document, but we use 0.9 per second to be safe
otherRateLimit = 14.9 // 15 per second in document, but we use 14.9 per second to be safe
)
type limiter struct {
list *rate.Limiter
link *rate.Limiter
other *rate.Limiter
}
func getLimiter() *limiter {
return &limiter{
list: rate.NewLimiter(rate.Limit(listRateLimit), 1),
link: rate.NewLimiter(rate.Limit(linkRateLimit), 1),
other: rate.NewLimiter(rate.Limit(otherRateLimit), 1),
}
}
func (l *limiter) wait(ctx context.Context, typ limiterType) error {
if l == nil {
return fmt.Errorf("driver not init")
}
switch typ {
case limiterList:
return l.list.Wait(ctx)
case limiterLink:
return l.link.Wait(ctx)
case limiterOther:
return l.other.Wait(ctx)
default:
return fmt.Errorf("unknown limiter type")
}
}
func (l *limiter) free() {
}
func (d *AliyundriveShare) wait(ctx context.Context, typ limiterType) error {
if d == nil {
return fmt.Errorf("driver not init")
}
//if d.ref != nil {
// return d.ref.wait(ctx, typ) // If this is a reference driver, wait on the reference driver.
//}
return d.limiter.wait(ctx, typ)
}

View File

@ -1,6 +1,7 @@
package aliyundrive_share
import (
"context"
"errors"
"fmt"
@ -15,11 +16,15 @@ const (
CanaryHeaderValue = "client=web,app=share,version=v2.3.1"
)
func (d *AliyundriveShare) refreshToken() error {
func (d *AliyundriveShare) refreshToken(ctx context.Context) error {
err := d.wait(ctx, limiterOther)
if err != nil {
return err
}
url := "https://auth.alipan.com/v2/account/token"
var resp base.TokenResp
var e ErrorResp
_, err := base.RestyClient.R().
_, err = base.RestyClient.R().
SetBody(base.Json{"refresh_token": d.RefreshToken, "grant_type": "refresh_token"}).
SetResult(&resp).
SetError(&e).
@ -36,7 +41,11 @@ func (d *AliyundriveShare) refreshToken() error {
}
// do others that not defined in Driver interface
func (d *AliyundriveShare) getShareToken() error {
func (d *AliyundriveShare) getShareToken(ctx context.Context) error {
err := d.wait(ctx, limiterOther)
if err != nil {
return err
}
data := base.Json{
"share_id": d.ShareId,
}
@ -45,7 +54,7 @@ func (d *AliyundriveShare) getShareToken() error {
}
var e ErrorResp
var resp ShareTokenResp
_, err := base.RestyClient.R().
_, err = base.RestyClient.R().
SetResult(&resp).SetError(&e).SetBody(data).
Post("https://api.alipan.com/v2/share_link/get_share_token")
if err != nil {
@ -58,7 +67,7 @@ func (d *AliyundriveShare) getShareToken() error {
return nil
}
func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback) ([]byte, error) {
func (d *AliyundriveShare) request(ctx context.Context, limitTy limiterType, url, method string, callback base.ReqCallback) ([]byte, error) {
var e ErrorResp
req := base.RestyClient.R().
SetError(&e).
@ -71,6 +80,10 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
} else {
req.SetBody("{}")
}
err := d.wait(ctx, limitTy)
if err != nil {
return nil, err
}
resp, err := req.Execute(method, url)
if err != nil {
return nil, err
@ -78,14 +91,14 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
if e.Code != "" {
if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" {
if e.Code == "AccessTokenInvalid" {
err = d.refreshToken()
err = d.refreshToken(ctx)
} else {
err = d.getShareToken()
err = d.getShareToken(ctx)
}
if err != nil {
return nil, err
}
return d.request(url, method, callback)
return d.request(ctx, limitTy, url, method, callback)
} else {
return nil, errors.New(e.Code + ": " + e.Message)
}
@ -93,7 +106,7 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
return resp.Body(), nil
}
func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
func (d *AliyundriveShare) getFiles(ctx context.Context, fileId string) ([]File, error) {
files := make([]File, 0)
data := base.Json{
"image_thumbnail_process": "image/resize,w_160/format,jpeg",
@ -110,6 +123,10 @@ func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
if data["marker"] == "first" {
data["marker"] = ""
}
err := d.wait(ctx, limiterList)
if err != nil {
return nil, err
}
var e ErrorResp
var resp ListResp
res, err := base.RestyClient.R().
@ -123,11 +140,11 @@ func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
log.Debugf("aliyundrive share get files: %s", res.String())
if e.Code != "" {
if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" {
err = d.getShareToken()
err = d.getShareToken(ctx)
if err != nil {
return nil, err
}
return d.getFiles(fileId)
return d.getFiles(ctx, fileId)
}
return nil, errors.New(e.Message)
}

View File

@ -23,6 +23,7 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve"
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve_v4"
_ "github.com/OpenListTeam/OpenList/v4/drivers/crypt"
_ "github.com/OpenListTeam/OpenList/v4/drivers/degoo"
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao"
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/dropbox"
@ -48,6 +49,7 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_app"
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_sharelink"
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist"
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak"
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_open"
@ -59,6 +61,7 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/smb"
_ "github.com/OpenListTeam/OpenList/v4/drivers/strm"
_ "github.com/OpenListTeam/OpenList/v4/drivers/teambition"
_ "github.com/OpenListTeam/OpenList/v4/drivers/teldrive"
_ "github.com/OpenListTeam/OpenList/v4/drivers/terabox"
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder"
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder_browser"

View File

@ -203,11 +203,12 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
streamSize := stream.GetSize()
sliceSize := d.getSliceSize(streamSize)
count := int(streamSize / sliceSize)
count := 1
if streamSize > sliceSize {
count = int((streamSize + sliceSize - 1) / sliceSize)
}
lastBlockSize := streamSize % sliceSize
if lastBlockSize > 0 {
count++
} else {
if lastBlockSize == 0 {
lastBlockSize = sliceSize
}

View File

@ -262,11 +262,12 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
// 计算需要的数据
streamSize := stream.GetSize()
count := int(streamSize / DEFAULT)
count := 1
if streamSize > DEFAULT {
count = int((streamSize + DEFAULT - 1) / DEFAULT)
}
lastBlockSize := streamSize % DEFAULT
if lastBlockSize > 0 {
count++
} else {
if lastBlockSize == 0 {
lastBlockSize = DEFAULT
}

View File

@ -255,7 +255,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
},
UpdateProgress: up,
})
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, "https://pan-yz.chaoxing.com/upload", r)
if err != nil {
return err
}

View File

@ -167,7 +167,7 @@ func (d *ChaoXing) Login() (string, error) {
return "", err
}
// Create the request
req, err := http.NewRequest("POST", "https://passport2.chaoxing.com/fanyalogin", body)
req, err := http.NewRequest(http.MethodPost, "https://passport2.chaoxing.com/fanyalogin", body)
if err != nil {
return "", err
}

View File

@ -20,6 +20,7 @@ type Addition struct {
var config = driver.Config{
Name: "Cloudreve",
DefaultRoot: "/",
LocalSort: true,
}
func init() {

View File

@ -18,6 +18,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/cookie"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
@ -236,28 +237,32 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
}
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadURLs[0]
credential := u.Credential
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
err := retry.Do(
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
func() error {
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
@ -290,6 +295,7 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
@ -301,26 +307,29 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
}
func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadURLs[0]
var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
err := retry.Do(
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
func() error {
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl,
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
@ -346,6 +355,7 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
@ -359,27 +369,31 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
}
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
var finish int64 = 0
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
err := retry.Do(
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
func() error {
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadURLs[chunk],
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
@ -404,6 +418,7 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}

View File

@ -20,7 +20,9 @@ import (
type CloudreveV4 struct {
model.Storage
Addition
ref *CloudreveV4
ref *CloudreveV4
AccessExpires string
RefreshExpires string
}
func (d *CloudreveV4) Config() driver.Config {
@ -44,13 +46,17 @@ func (d *CloudreveV4) Init(ctx context.Context) error {
if d.ref != nil {
return nil
}
if d.AccessToken == "" && d.RefreshToken != "" {
return d.refreshToken()
}
if d.Username != "" {
if d.canLogin() {
return d.login()
}
return nil
if d.RefreshToken != "" {
return d.refreshToken()
}
if d.AccessToken == "" {
return errors.New("no way to authenticate. At least AccessToken is required")
}
// ensure AccessToken is valid
return d.parseJWT(d.AccessToken, &AccessJWT{})
}
func (d *CloudreveV4) InitReference(storage driver.Driver) error {

View File

@ -66,11 +66,27 @@ type CaptchaResp struct {
Ticket string `json:"ticket"`
}
type AccessJWT struct {
TokenType string `json:"token_type"`
Sub string `json:"sub"`
Exp int64 `json:"exp"`
Nbf int64 `json:"nbf"`
}
type RefreshJWT struct {
TokenType string `json:"token_type"`
Sub string `json:"sub"`
Exp int `json:"exp"`
Nbf int `json:"nbf"`
StateHash string `json:"state_hash"`
RootTokenID string `json:"root_token_id"`
}
type Token struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
AccessExpires time.Time `json:"access_expires"`
RefreshExpires time.Time `json:"refresh_expires"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
AccessExpires string `json:"access_expires"`
RefreshExpires string `json:"refresh_expires"`
}
type TokenResponse struct {

View File

@ -19,6 +19,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
@ -27,6 +28,15 @@ import (
// do others that not defined in Driver interface
const (
CodeLoginRequired = http.StatusUnauthorized
CodeCredentialInvalid = 40020 // Failed to issue token
)
var (
ErrorIssueToken = errors.New("failed to issue token")
)
func (d *CloudreveV4) getUA() string {
if d.CustomUA != "" {
return d.CustomUA
@ -38,6 +48,23 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
if d.ref != nil {
return d.ref.request(method, path, callback, out)
}
// ensure token
if d.isTokenExpired() {
err := d.refreshToken()
if err != nil {
return err
}
}
return d._request(method, path, callback, out)
}
func (d *CloudreveV4) _request(method string, path string, callback base.ReqCallback, out any) error {
if d.ref != nil {
return d.ref._request(method, path, callback, out)
}
u := d.Address + "/api/v4" + path
req := base.RestyClient.R()
req.SetHeaders(map[string]string{
@ -64,15 +91,17 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
}
if r.Code != 0 {
if r.Code == 401 && d.RefreshToken != "" && path != "/session/token/refresh" {
// try to refresh token
err = d.refreshToken()
if r.Code == CodeLoginRequired && d.canLogin() && path != "/session/token/refresh" {
err = d.login()
if err != nil {
return err
}
return d.request(method, path, callback, out)
}
return errors.New(r.Msg)
if r.Code == CodeCredentialInvalid {
return ErrorIssueToken
}
return fmt.Errorf("%d: %s", r.Code, r.Msg)
}
if out != nil && r.Data != nil {
@ -90,14 +119,18 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
return nil
}
func (d *CloudreveV4) canLogin() bool {
return d.Username != "" && d.Password != ""
}
func (d *CloudreveV4) login() error {
var siteConfig SiteLoginConfigResp
err := d.request(http.MethodGet, "/site/config/login", nil, &siteConfig)
err := d._request(http.MethodGet, "/site/config/login", nil, &siteConfig)
if err != nil {
return err
}
var prepareLogin PrepareLoginResp
err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
err = d._request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
if err != nil {
return err
}
@ -127,7 +160,7 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
}
if needCaptcha {
var config BasicConfigResp
err = d.request(http.MethodGet, "/site/config/basic", nil, &config)
err = d._request(http.MethodGet, "/site/config/basic", nil, &config)
if err != nil {
return err
}
@ -135,7 +168,7 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
return fmt.Errorf("captcha type %s not support", config.CaptchaType)
}
var captcha CaptchaResp
err = d.request(http.MethodGet, "/site/captcha", nil, &captcha)
err = d._request(http.MethodGet, "/site/captcha", nil, &captcha)
if err != nil {
return err
}
@ -161,20 +194,22 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
loginBody["captcha"] = captchaCode
}
var token TokenResponse
err = d.request(http.MethodPost, "/session/token", func(req *resty.Request) {
err = d._request(http.MethodPost, "/session/token", func(req *resty.Request) {
req.SetBody(loginBody)
}, &token)
if err != nil {
return err
}
d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken
d.AccessExpires, d.RefreshExpires = token.Token.AccessExpires, token.Token.RefreshExpires
op.MustSaveDriverStorage(d)
return nil
}
func (d *CloudreveV4) refreshToken() error {
// if no refresh token, try to login if possible
if d.RefreshToken == "" {
if d.Username != "" {
if d.canLogin() {
err := d.login()
if err != nil {
return fmt.Errorf("cannot login to get refresh token, error: %s", err)
@ -182,20 +217,127 @@ func (d *CloudreveV4) refreshToken() error {
}
return nil
}
// parse jwt to check if refresh token is valid
var jwt RefreshJWT
err := d.parseJWT(d.RefreshToken, &jwt)
if err != nil {
// if refresh token is invalid, try to login if possible
if d.canLogin() {
return d.login()
}
d.GetStorage().SetStatus(fmt.Sprintf("Invalid RefreshToken: %s", err.Error()))
op.MustSaveDriverStorage(d)
return fmt.Errorf("invalid refresh token: %w", err)
}
// do refresh token
var token Token
err := d.request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
err = d._request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
req.SetBody(base.Json{
"refresh_token": d.RefreshToken,
})
}, &token)
if err != nil {
if errors.Is(err, ErrorIssueToken) {
if d.canLogin() {
// try to login again
return d.login()
}
d.GetStorage().SetStatus("This session is no longer valid")
op.MustSaveDriverStorage(d)
return ErrorIssueToken
}
return err
}
d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken
d.AccessExpires, d.RefreshExpires = token.AccessExpires, token.RefreshExpires
op.MustSaveDriverStorage(d)
return nil
}
func (d *CloudreveV4) parseJWT(token string, jwt any) error {
split := strings.Split(token, ".")
if len(split) != 3 {
return fmt.Errorf("invalid token length: %d, ensure the token is a valid JWT", len(split))
}
data, err := base64.RawURLEncoding.DecodeString(split[1])
if err != nil {
return fmt.Errorf("invalid token encoding: %w, ensure the token is a valid JWT", err)
}
err = json.Unmarshal(data, &jwt)
if err != nil {
return fmt.Errorf("invalid token content: %w, ensure the token is a valid JWT", err)
}
return nil
}
// check if token is expired
// https://github.com/cloudreve/frontend/blob/ddfacc1c31c49be03beb71de4cc114c8811038d6/src/session/index.ts#L177-L200
func (d *CloudreveV4) isTokenExpired() bool {
if d.RefreshToken == "" {
// login again if username and password is set
if d.canLogin() {
return true
}
// no refresh token, cannot refresh
return false
}
if d.AccessToken == "" {
return true
}
var (
err error
expires time.Time
)
// check if token is expired
if d.AccessExpires != "" {
// use expires field if possible to prevent timezone issue
// only available after login or refresh token
// 2025-08-28T02:43:07.645109985+08:00
expires, err = time.Parse(time.RFC3339Nano, d.AccessExpires)
if err != nil {
return false
}
} else {
// fallback to parse jwt
// if failed, disable the storage
var jwt AccessJWT
err = d.parseJWT(d.AccessToken, &jwt)
if err != nil {
d.GetStorage().SetStatus(fmt.Sprintf("Invalid AccessToken: %s", err.Error()))
op.MustSaveDriverStorage(d)
return false
}
// may be have timezone issue
expires = time.Unix(jwt.Exp, 0)
}
// add a 10 minutes safe margin
ddl := time.Now().Add(10 * time.Minute)
if expires.Before(ddl) {
// current access token expired, check if refresh token is expired
// warning: cannot parse refresh token from jwt, because the exp field is not standard
if d.RefreshExpires != "" {
refreshExpires, err := time.Parse(time.RFC3339Nano, d.RefreshExpires)
if err != nil {
return false
}
if refreshExpires.Before(time.Now()) {
// This session is no longer valid
if d.canLogin() {
// try to login again
return true
}
d.GetStorage().SetStatus("This session is no longer valid")
op.MustSaveDriverStorage(d)
return false
}
}
return true
}
return false
}
func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
var finish int64 = 0
var chunk int = 0
@ -251,28 +393,32 @@ func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u Fi
}
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadUrls[0]
credential := u.Credential
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
err := retry.Do(
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
func() error {
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
@ -305,6 +451,7 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
@ -316,26 +463,29 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
}
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadUrls[0]
var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
err := retry.Do(
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
func() error {
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl,
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
@ -362,6 +512,7 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
@ -375,27 +526,31 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
}
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
var finish int64 = 0
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
err := retry.Do(
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
func() error {
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadUrls[chunk],
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
@ -421,6 +576,7 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}

View File

@ -292,10 +292,10 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if offset == 0 && limit > 0 {
fileHeader = make([]byte, fileHeaderSize)
n, _ := io.ReadFull(remoteReader, fileHeader)
n, err := io.ReadFull(remoteReader, fileHeader)
if n != fileHeaderSize {
fileHeader = nil
return nil, fmt.Errorf("can't read data, expected=%d, got=%d", fileHeaderSize, n)
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", fileHeaderSize, n, err)
}
if limit <= fileHeaderSize {
remoteReader.Close()
@ -317,8 +317,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
}
return readSeeker, nil
}),
SyncClosers: utils.NewSyncClosers(remoteLink),
ContentLength: remoteSize,
SyncClosers: utils.NewSyncClosers(remoteLink),
}, nil
}
@ -402,7 +401,6 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
},
Reader: wrappedIn,
Mimetype: "application/octet-stream",
WebPutAsTask: streamer.NeedStore(),
ForceStreamUpload: true,
Exist: streamer.GetExist(),
}

203
drivers/degoo/driver.go Normal file
View File

@ -0,0 +1,203 @@
package degoo
import (
"context"
"fmt"
"net/http"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
type Degoo struct {
model.Storage
Addition
client *http.Client
}
func (d *Degoo) Config() driver.Config {
return config
}
func (d *Degoo) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Degoo) Init(ctx context.Context) error {
d.client = base.HttpClient
// Ensure we have a valid token (will login if needed or refresh if expired)
if err := d.ensureValidToken(ctx); err != nil {
return fmt.Errorf("failed to initialize token: %w", err)
}
return d.getDevices(ctx)
}
func (d *Degoo) Drop(ctx context.Context) error {
return nil
}
func (d *Degoo) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
items, err := d.getAllFileChildren5(ctx, dir.GetID())
if err != nil {
return nil, err
}
return utils.MustSliceConvert(items, func(s DegooFileItem) model.Obj {
isFolder := s.Category == 2 || s.Category == 1 || s.Category == 10
createTime, modTime, _ := humanReadableTimes(s.CreationTime, s.LastModificationTime, s.LastUploadTime)
size, err := strconv.ParseInt(s.Size, 10, 64)
if err != nil {
size = 0 // Default to 0 if size parsing fails
}
return &model.Object{
ID: s.ID,
Path: s.FilePath,
Name: s.Name,
Size: size,
Modified: modTime,
Ctime: createTime,
IsFolder: isFolder,
}
}), nil
}
func (d *Degoo) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
item, err := d.getOverlay4(ctx, file.GetID())
if err != nil {
return nil, err
}
return &model.Link{URL: item.URL}, nil
}
func (d *Degoo) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
// This is done by calling the setUploadFile3 API with a special checksum and size.
const query = `mutation SetUploadFile3($Token: String!, $FileInfos: [FileInfoUpload3]!) { setUploadFile3(Token: $Token, FileInfos: $FileInfos) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"FileInfos": []map[string]interface{}{
{
"Checksum": folderChecksum,
"Name": dirName,
"CreationTime": time.Now().UnixMilli(),
"ParentID": parentDir.GetID(),
"Size": 0,
},
},
}
_, err := d.apiCall(ctx, "SetUploadFile3", query, variables)
if err != nil {
return err
}
return nil
}
func (d *Degoo) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
const query = `mutation SetMoveFile($Token: String!, $Copy: Boolean, $NewParentID: String!, $FileIDs: [String]!) { setMoveFile(Token: $Token, Copy: $Copy, NewParentID: $NewParentID, FileIDs: $FileIDs) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"Copy": false,
"NewParentID": dstDir.GetID(),
"FileIDs": []string{srcObj.GetID()},
}
_, err := d.apiCall(ctx, "SetMoveFile", query, variables)
if err != nil {
return nil, err
}
return srcObj, nil
}
func (d *Degoo) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
const query = `mutation SetRenameFile($Token: String!, $FileRenames: [FileRenameInfo]!) { setRenameFile(Token: $Token, FileRenames: $FileRenames) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"FileRenames": []DegooFileRenameInfo{
{
ID: srcObj.GetID(),
NewName: newName,
},
},
}
_, err := d.apiCall(ctx, "SetRenameFile", query, variables)
if err != nil {
return err
}
return nil
}
func (d *Degoo) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
// Copy is not implemented, Degoo API does not support direct copy.
return nil, errs.NotImplement
}
func (d *Degoo) Remove(ctx context.Context, obj model.Obj) error {
// Remove deletes a file or folder (moves to trash).
const query = `mutation SetDeleteFile5($Token: String!, $IsInRecycleBin: Boolean!, $IDs: [IDType]!) { setDeleteFile5(Token: $Token, IsInRecycleBin: $IsInRecycleBin, IDs: $IDs) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"IsInRecycleBin": false,
"IDs": []map[string]string{{"FileID": obj.GetID()}},
}
_, err := d.apiCall(ctx, "SetDeleteFile5", query, variables)
return err
}
func (d *Degoo) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
tmpF, err := file.CacheFullAndWriter(&up, nil)
if err != nil {
return err
}
parentID := dstDir.GetID()
// Calculate the checksum for the file.
checksum, err := d.checkSum(tmpF)
if err != nil {
return err
}
// 1. Get upload authorization via getBucketWriteAuth4.
auths, err := d.getBucketWriteAuth4(ctx, file, parentID, checksum)
if err != nil {
return err
}
// 2. Upload file.
// support rapid upload
if auths.GetBucketWriteAuth4[0].Error != "Already exist!" {
err = d.uploadS3(ctx, auths, tmpF, file, checksum)
if err != nil {
return err
}
}
// 3. Register metadata with setUploadFile3.
data, err := d.SetUploadFile3(ctx, file, parentID, checksum)
if err != nil {
return err
}
if !data.SetUploadFile3 {
return fmt.Errorf("setUploadFile3 failed: %v", data)
}
return nil
}

27
drivers/degoo/meta.go Normal file
View File

@ -0,0 +1,27 @@
package degoo
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
driver.RootID
Username string `json:"username" help:"Your Degoo account email"`
Password string `json:"password" help:"Your Degoo account password"`
RefreshToken string `json:"refresh_token" help:"Refresh token for automatic token renewal, obtained automatically"`
AccessToken string `json:"access_token" help:"Access token for Degoo API, obtained automatically"`
}
var config = driver.Config{
Name: "Degoo",
LocalSort: true,
DefaultRoot: "0",
NoOverwriteUpload: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Degoo{}
})
}

110
drivers/degoo/types.go Normal file
View File

@ -0,0 +1,110 @@
package degoo
import (
"encoding/json"
)
// DegooLoginRequest represents the login request body.
type DegooLoginRequest struct {
GenerateToken bool `json:"GenerateToken"`
Username string `json:"Username"`
Password string `json:"Password"`
}
// DegooLoginResponse represents a successful login response.
type DegooLoginResponse struct {
Token string `json:"Token"`
RefreshToken string `json:"RefreshToken"`
}
// DegooAccessTokenRequest represents the token refresh request body.
type DegooAccessTokenRequest struct {
RefreshToken string `json:"RefreshToken"`
}
// DegooAccessTokenResponse represents the token refresh response.
type DegooAccessTokenResponse struct {
AccessToken string `json:"AccessToken"`
}
// DegooFileItem represents a Degoo file or folder.
type DegooFileItem struct {
ID string `json:"ID"`
ParentID string `json:"ParentID"`
Name string `json:"Name"`
Category int `json:"Category"`
Size string `json:"Size"`
URL string `json:"URL"`
CreationTime string `json:"CreationTime"`
LastModificationTime string `json:"LastModificationTime"`
LastUploadTime string `json:"LastUploadTime"`
MetadataID string `json:"MetadataID"`
DeviceID int64 `json:"DeviceID"`
FilePath string `json:"FilePath"`
IsInRecycleBin bool `json:"IsInRecycleBin"`
}
type DegooErrors struct {
Path []string `json:"path"`
Data interface{} `json:"data"`
ErrorType string `json:"errorType"`
ErrorInfo interface{} `json:"errorInfo"`
Message string `json:"message"`
}
// DegooGraphqlResponse is the common structure for GraphQL API responses.
type DegooGraphqlResponse struct {
Data json.RawMessage `json:"data"`
Errors []DegooErrors `json:"errors,omitempty"`
}
// DegooGetChildren5Data is the data field for getFileChildren5.
type DegooGetChildren5Data struct {
GetFileChildren5 struct {
Items []DegooFileItem `json:"Items"`
NextToken string `json:"NextToken"`
} `json:"getFileChildren5"`
}
// DegooGetOverlay4Data is the data field for getOverlay4.
type DegooGetOverlay4Data struct {
GetOverlay4 DegooFileItem `json:"getOverlay4"`
}
// DegooFileRenameInfo represents a file rename operation.
type DegooFileRenameInfo struct {
ID string `json:"ID"`
NewName string `json:"NewName"`
}
// DegooFileIDs represents a list of file IDs for move operations.
type DegooFileIDs struct {
FileIDs []string `json:"FileIDs"`
}
// DegooGetBucketWriteAuth4Data is the data field for GetBucketWriteAuth4.
type DegooGetBucketWriteAuth4Data struct {
GetBucketWriteAuth4 []struct {
AuthData struct {
PolicyBase64 string `json:"PolicyBase64"`
Signature string `json:"Signature"`
BaseURL string `json:"BaseURL"`
KeyPrefix string `json:"KeyPrefix"`
AccessKey struct {
Key string `json:"Key"`
Value string `json:"Value"`
} `json:"AccessKey"`
ACL string `json:"ACL"`
AdditionalBody []struct {
Key string `json:"Key"`
Value string `json:"Value"`
} `json:"AdditionalBody"`
} `json:"AuthData"`
Error interface{} `json:"Error"`
} `json:"getBucketWriteAuth4"`
}
// DegooSetUploadFile3Data is the data field for SetUploadFile3.
type DegooSetUploadFile3Data struct {
SetUploadFile3 bool `json:"setUploadFile3"`
}

198
drivers/degoo/upload.go Normal file
View File

@ -0,0 +1,198 @@
package degoo
import (
"bytes"
"context"
"crypto/sha1"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"strconv"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
func (d *Degoo) getBucketWriteAuth4(ctx context.Context, file model.FileStreamer, parentID string, checksum string) (*DegooGetBucketWriteAuth4Data, error) {
const query = `query GetBucketWriteAuth4(
$Token: String!
$ParentID: String!
$StorageUploadInfos: [StorageUploadInfo2]
) {
getBucketWriteAuth4(
Token: $Token
ParentID: $ParentID
StorageUploadInfos: $StorageUploadInfos
) {
AuthData {
PolicyBase64
Signature
BaseURL
KeyPrefix
AccessKey {
Key
Value
}
ACL
AdditionalBody {
Key
Value
}
}
Error
}
}`
variables := map[string]interface{}{
"Token": d.AccessToken,
"ParentID": parentID,
"StorageUploadInfos": []map[string]string{{
"FileName": file.GetName(),
"Checksum": checksum,
"Size": strconv.FormatInt(file.GetSize(), 10),
}}}
data, err := d.apiCall(ctx, "GetBucketWriteAuth4", query, variables)
if err != nil {
return nil, err
}
var resp DegooGetBucketWriteAuth4Data
err = json.Unmarshal(data, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
// checkSum calculates the SHA1-based checksum for Degoo upload API.
func (d *Degoo) checkSum(file io.Reader) (string, error) {
seed := []byte{13, 7, 2, 2, 15, 40, 75, 117, 13, 10, 19, 16, 29, 23, 3, 36}
hasher := sha1.New()
hasher.Write(seed)
if _, err := utils.CopyWithBuffer(hasher, file); err != nil {
return "", err
}
cs := hasher.Sum(nil)
csBytes := []byte{10, byte(len(cs))}
csBytes = append(csBytes, cs...)
csBytes = append(csBytes, 16, 0)
return strings.ReplaceAll(base64.StdEncoding.EncodeToString(csBytes), "/", "_"), nil
}
func (d *Degoo) uploadS3(ctx context.Context, auths *DegooGetBucketWriteAuth4Data, tmpF model.File, file model.FileStreamer, checksum string) error {
a := auths.GetBucketWriteAuth4[0].AuthData
_, err := tmpF.Seek(0, io.SeekStart)
if err != nil {
return err
}
ext := utils.Ext(file.GetName())
key := fmt.Sprintf("%s%s/%s.%s", a.KeyPrefix, ext, checksum, ext)
var b bytes.Buffer
w := multipart.NewWriter(&b)
err = w.WriteField("key", key)
if err != nil {
return err
}
err = w.WriteField("acl", a.ACL)
if err != nil {
return err
}
err = w.WriteField("policy", a.PolicyBase64)
if err != nil {
return err
}
err = w.WriteField("signature", a.Signature)
if err != nil {
return err
}
err = w.WriteField(a.AccessKey.Key, a.AccessKey.Value)
if err != nil {
return err
}
for _, additional := range a.AdditionalBody {
err = w.WriteField(additional.Key, additional.Value)
if err != nil {
return err
}
}
err = w.WriteField("Content-Type", "")
if err != nil {
return err
}
_, err = w.CreateFormFile("file", key)
if err != nil {
return err
}
headSize := b.Len()
err = w.Close()
if err != nil {
return err
}
head := bytes.NewReader(b.Bytes()[:headSize])
tail := bytes.NewReader(b.Bytes()[headSize:])
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.MultiReader(head, tmpF, tail))
req, err := http.NewRequestWithContext(ctx, http.MethodPost, a.BaseURL, rateLimitedRd)
if err != nil {
return err
}
req.Header.Add("ngsw-bypass", "1")
req.Header.Add("Content-Type", w.FormDataContentType())
res, err := d.client.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusNoContent {
return fmt.Errorf("upload failed with status code %d", res.StatusCode)
}
return nil
}
var _ driver.Driver = (*Degoo)(nil)
func (d *Degoo) SetUploadFile3(ctx context.Context, file model.FileStreamer, parentID string, checksum string) (*DegooSetUploadFile3Data, error) {
const query = `mutation SetUploadFile3($Token: String!, $FileInfos: [FileInfoUpload3]!) {
setUploadFile3(Token: $Token, FileInfos: $FileInfos)
}`
variables := map[string]interface{}{
"Token": d.AccessToken,
"FileInfos": []map[string]string{{
"Checksum": checksum,
"CreationTime": strconv.FormatInt(file.CreateTime().UnixMilli(), 10),
"Name": file.GetName(),
"ParentID": parentID,
"Size": strconv.FormatInt(file.GetSize(), 10),
}}}
data, err := d.apiCall(ctx, "SetUploadFile3", query, variables)
if err != nil {
return nil, err
}
var resp DegooSetUploadFile3Data
err = json.Unmarshal(data, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}

462
drivers/degoo/util.go Normal file
View File

@ -0,0 +1,462 @@
package degoo
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
// Thanks to https://github.com/bernd-wechner/Degoo for API research.
const (
// API endpoints
loginURL = "https://rest-api.degoo.com/login"
accessTokenURL = "https://rest-api.degoo.com/access-token/v2"
apiURL = "https://production-appsync.degoo.com/graphql"
// API configuration
apiKey = "da2-vs6twz5vnjdavpqndtbzg3prra"
folderChecksum = "CgAQAg"
// Token management
tokenRefreshThreshold = 5 * time.Minute
// Rate limiting
minRequestInterval = 1 * time.Second
// Error messages
errRateLimited = "rate limited (429), please try again later"
errUnauthorized = "unauthorized access"
)
var (
// Global rate limiting - protects against concurrent API calls
lastRequestTime time.Time
requestMutex sync.Mutex
)
// JWT payload structure for token expiration checking
type JWTPayload struct {
UserID string `json:"userID"`
Exp int64 `json:"exp"`
Iat int64 `json:"iat"`
}
// Rate limiting helper functions
// applyRateLimit ensures minimum interval between API requests
func applyRateLimit() {
requestMutex.Lock()
defer requestMutex.Unlock()
if !lastRequestTime.IsZero() {
if elapsed := time.Since(lastRequestTime); elapsed < minRequestInterval {
time.Sleep(minRequestInterval - elapsed)
}
}
lastRequestTime = time.Now()
}
// HTTP request helper functions
// createJSONRequest creates a new HTTP request with JSON body
func createJSONRequest(ctx context.Context, method, url string, body interface{}) (*http.Request, error) {
jsonBody, err := json.Marshal(body)
if err != nil {
return nil, fmt.Errorf("failed to marshal request body: %w", err)
}
req, err := http.NewRequestWithContext(ctx, method, url, bytes.NewBuffer(jsonBody))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", base.UserAgent)
return req, nil
}
// checkHTTPResponse checks for common HTTP error conditions
func checkHTTPResponse(resp *http.Response, operation string) error {
if resp.StatusCode == http.StatusTooManyRequests {
return fmt.Errorf("%s %s", operation, errRateLimited)
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s failed: %s", operation, resp.Status)
}
return nil
}
// isTokenExpired checks if the JWT token is expired or will expire soon
func (d *Degoo) isTokenExpired() bool {
if d.AccessToken == "" {
return true
}
payload, err := extractJWTPayload(d.AccessToken)
if err != nil {
return true // Invalid token format
}
// Check if token expires within the threshold
expireTime := time.Unix(payload.Exp, 0)
return time.Now().Add(tokenRefreshThreshold).After(expireTime)
}
// extractJWTPayload extracts and parses JWT payload
func extractJWTPayload(token string) (*JWTPayload, error) {
parts := strings.Split(token, ".")
if len(parts) != 3 {
return nil, fmt.Errorf("invalid JWT format")
}
// Decode the payload (second part)
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
return nil, fmt.Errorf("failed to decode JWT payload: %w", err)
}
var jwtPayload JWTPayload
if err := json.Unmarshal(payload, &jwtPayload); err != nil {
return nil, fmt.Errorf("failed to parse JWT payload: %w", err)
}
return &jwtPayload, nil
}
// refreshToken attempts to refresh the access token using the refresh token
func (d *Degoo) refreshToken(ctx context.Context) error {
if d.RefreshToken == "" {
return fmt.Errorf("no refresh token available")
}
// Create request
tokenReq := DegooAccessTokenRequest{RefreshToken: d.RefreshToken}
req, err := createJSONRequest(ctx, "POST", accessTokenURL, tokenReq)
if err != nil {
return fmt.Errorf("failed to create refresh token request: %w", err)
}
// Execute request
resp, err := d.client.Do(req)
if err != nil {
return fmt.Errorf("refresh token request failed: %w", err)
}
defer resp.Body.Close()
// Check response
if err := checkHTTPResponse(resp, "refresh token"); err != nil {
return err
}
var accessTokenResp DegooAccessTokenResponse
if err := json.NewDecoder(resp.Body).Decode(&accessTokenResp); err != nil {
return fmt.Errorf("failed to parse access token response: %w", err)
}
if accessTokenResp.AccessToken == "" {
return fmt.Errorf("empty access token received")
}
d.AccessToken = accessTokenResp.AccessToken
// Save the updated token to storage
op.MustSaveDriverStorage(d)
return nil
}
// ensureValidToken ensures we have a valid, non-expired token
func (d *Degoo) ensureValidToken(ctx context.Context) error {
// Check if token is expired or will expire soon
if d.isTokenExpired() {
// Try to refresh token first if we have a refresh token
if d.RefreshToken != "" {
if refreshErr := d.refreshToken(ctx); refreshErr == nil {
return nil // Successfully refreshed
} else {
// If refresh failed, fall back to full login
fmt.Printf("Token refresh failed, falling back to full login: %v\n", refreshErr)
}
}
// Perform full login
if d.Username != "" && d.Password != "" {
return d.login(ctx)
}
}
return nil
}
// login performs the login process and retrieves the access token.
func (d *Degoo) login(ctx context.Context) error {
if d.Username == "" || d.Password == "" {
return fmt.Errorf("username or password not provided")
}
creds := DegooLoginRequest{
GenerateToken: true,
Username: d.Username,
Password: d.Password,
}
jsonCreds, err := json.Marshal(creds)
if err != nil {
return fmt.Errorf("failed to serialize login credentials: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", loginURL, bytes.NewBuffer(jsonCreds))
if err != nil {
return fmt.Errorf("failed to create login request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", base.UserAgent)
req.Header.Set("Origin", "https://app.degoo.com")
resp, err := d.client.Do(req)
if err != nil {
return fmt.Errorf("login request failed: %w", err)
}
defer resp.Body.Close()
// Handle rate limiting (429 Too Many Requests)
if resp.StatusCode == http.StatusTooManyRequests {
return fmt.Errorf("login rate limited (429), please try again later")
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("login failed: %s", resp.Status)
}
var loginResp DegooLoginResponse
if err := json.NewDecoder(resp.Body).Decode(&loginResp); err != nil {
return fmt.Errorf("failed to parse login response: %w", err)
}
if loginResp.RefreshToken != "" {
tokenReq := DegooAccessTokenRequest{RefreshToken: loginResp.RefreshToken}
jsonTokenReq, err := json.Marshal(tokenReq)
if err != nil {
return fmt.Errorf("failed to serialize access token request: %w", err)
}
tokenReqHTTP, err := http.NewRequestWithContext(ctx, "POST", accessTokenURL, bytes.NewBuffer(jsonTokenReq))
if err != nil {
return fmt.Errorf("failed to create access token request: %w", err)
}
tokenReqHTTP.Header.Set("User-Agent", base.UserAgent)
tokenResp, err := d.client.Do(tokenReqHTTP)
if err != nil {
return fmt.Errorf("failed to get access token: %w", err)
}
defer tokenResp.Body.Close()
var accessTokenResp DegooAccessTokenResponse
if err := json.NewDecoder(tokenResp.Body).Decode(&accessTokenResp); err != nil {
return fmt.Errorf("failed to parse access token response: %w", err)
}
d.AccessToken = accessTokenResp.AccessToken
d.RefreshToken = loginResp.RefreshToken // Save refresh token
} else if loginResp.Token != "" {
d.AccessToken = loginResp.Token
d.RefreshToken = "" // Direct token, no refresh token available
} else {
return fmt.Errorf("login failed, no valid token returned")
}
// Save the updated tokens to storage
op.MustSaveDriverStorage(d)
return nil
}
// apiCall performs a Degoo GraphQL API request.
func (d *Degoo) apiCall(ctx context.Context, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
// Apply rate limiting
applyRateLimit()
// Ensure we have a valid token before making the API call
if err := d.ensureValidToken(ctx); err != nil {
return nil, fmt.Errorf("failed to ensure valid token: %w", err)
}
// Update the Token in variables if it exists (after potential refresh)
d.updateTokenInVariables(variables)
return d.executeGraphQLRequest(ctx, operationName, query, variables)
}
// updateTokenInVariables updates the Token field in GraphQL variables
func (d *Degoo) updateTokenInVariables(variables map[string]interface{}) {
if variables != nil {
if _, hasToken := variables["Token"]; hasToken {
variables["Token"] = d.AccessToken
}
}
}
// executeGraphQLRequest executes a GraphQL request with retry logic
func (d *Degoo) executeGraphQLRequest(ctx context.Context, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
reqBody := map[string]interface{}{
"operationName": operationName,
"query": query,
"variables": variables,
}
// Create and configure request
req, err := createJSONRequest(ctx, "POST", apiURL, reqBody)
if err != nil {
return nil, err
}
// Set Degoo-specific headers
req.Header.Set("x-api-key", apiKey)
if d.AccessToken != "" {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.AccessToken))
}
// Execute request
resp, err := d.client.Do(req)
if err != nil {
return nil, fmt.Errorf("GraphQL API request failed: %w", err)
}
defer resp.Body.Close()
// Check for HTTP errors
if err := checkHTTPResponse(resp, "GraphQL API"); err != nil {
return nil, err
}
// Parse GraphQL response
var degooResp DegooGraphqlResponse
if err := json.NewDecoder(resp.Body).Decode(&degooResp); err != nil {
return nil, fmt.Errorf("failed to decode GraphQL response: %w", err)
}
// Handle GraphQL errors
if len(degooResp.Errors) > 0 {
return d.handleGraphQLError(ctx, degooResp.Errors[0], operationName, query, variables)
}
return degooResp.Data, nil
}
// handleGraphQLError handles GraphQL-level errors with retry logic
func (d *Degoo) handleGraphQLError(ctx context.Context, gqlError DegooErrors, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
if gqlError.ErrorType == "Unauthorized" {
// Re-login and retry
if err := d.login(ctx); err != nil {
return nil, fmt.Errorf("%s, login failed: %w", errUnauthorized, err)
}
// Update token in variables and retry
d.updateTokenInVariables(variables)
return d.apiCall(ctx, operationName, query, variables)
}
return nil, fmt.Errorf("GraphQL API error: %s", gqlError.Message)
}
// humanReadableTimes converts Degoo timestamps to Go time.Time.
func humanReadableTimes(creation, modification, upload string) (cTime, mTime, uTime time.Time) {
cTime, _ = time.Parse(time.RFC3339, creation)
if modification != "" {
modMillis, _ := strconv.ParseInt(modification, 10, 64)
mTime = time.Unix(0, modMillis*int64(time.Millisecond))
}
if upload != "" {
upMillis, _ := strconv.ParseInt(upload, 10, 64)
uTime = time.Unix(0, upMillis*int64(time.Millisecond))
}
return cTime, mTime, uTime
}
// getDevices fetches and caches top-level devices and folders.
func (d *Degoo) getDevices(ctx context.Context) error {
const query = `query GetFileChildren5($Token: String! $ParentID: String $AllParentIDs: [String] $Limit: Int! $Order: Int! $NextToken: String ) { getFileChildren5(Token: $Token ParentID: $ParentID AllParentIDs: $AllParentIDs Limit: $Limit Order: $Order NextToken: $NextToken) { Items { ParentID } NextToken } }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"ParentID": "0",
"Limit": 10,
"Order": 3,
}
data, err := d.apiCall(ctx, "GetFileChildren5", query, variables)
if err != nil {
return err
}
var resp DegooGetChildren5Data
if err := json.Unmarshal(data, &resp); err != nil {
return fmt.Errorf("failed to parse device list: %w", err)
}
if d.RootFolderID == "0" {
if len(resp.GetFileChildren5.Items) > 0 {
d.RootFolderID = resp.GetFileChildren5.Items[0].ParentID
}
op.MustSaveDriverStorage(d)
}
return nil
}
// getAllFileChildren5 fetches all children of a directory with pagination.
func (d *Degoo) getAllFileChildren5(ctx context.Context, parentID string) ([]DegooFileItem, error) {
const query = `query GetFileChildren5($Token: String! $ParentID: String $AllParentIDs: [String] $Limit: Int! $Order: Int! $NextToken: String ) { getFileChildren5(Token: $Token ParentID: $ParentID AllParentIDs: $AllParentIDs Limit: $Limit Order: $Order NextToken: $NextToken) { Items { ID ParentID Name Category Size CreationTime LastModificationTime LastUploadTime FilePath IsInRecycleBin DeviceID MetadataID } NextToken } }`
var allItems []DegooFileItem
nextToken := ""
for {
variables := map[string]interface{}{
"Token": d.AccessToken,
"ParentID": parentID,
"Limit": 1000,
"Order": 3,
}
if nextToken != "" {
variables["NextToken"] = nextToken
}
data, err := d.apiCall(ctx, "GetFileChildren5", query, variables)
if err != nil {
return nil, err
}
var resp DegooGetChildren5Data
if err := json.Unmarshal(data, &resp); err != nil {
return nil, err
}
allItems = append(allItems, resp.GetFileChildren5.Items...)
if resp.GetFileChildren5.NextToken == "" {
break
}
nextToken = resp.GetFileChildren5.NextToken
}
return allItems, nil
}
// getOverlay4 fetches metadata for a single item by ID.
func (d *Degoo) getOverlay4(ctx context.Context, id string) (DegooFileItem, error) {
const query = `query GetOverlay4($Token: String!, $ID: IDType!) { getOverlay4(Token: $Token, ID: $ID) { ID ParentID Name Category Size CreationTime LastModificationTime LastUploadTime URL FilePath IsInRecycleBin DeviceID MetadataID } }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"ID": map[string]string{
"FileID": id,
},
}
data, err := d.apiCall(ctx, "GetOverlay4", query, variables)
if err != nil {
return DegooFileItem{}, err
}
var resp DegooGetOverlay4Data
if err := json.Unmarshal(data, &resp); err != nil {
return DegooFileItem{}, fmt.Errorf("failed to parse item metadata: %w", err)
}
return resp.GetOverlay4, nil
}

View File

@ -236,7 +236,7 @@ func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
// 根据文件大小选择上传方式
if file.GetSize() <= 1*utils.MB { // 小于1MB使用普通模式上传
return d.Upload(&uploadConfig, dstDir, file, up, dataType)
return d.Upload(ctx, &uploadConfig, dstDir, file, up, dataType)
}
// 大文件使用分片上传
return d.UploadByMultipart(ctx, &uploadConfig, file.GetSize(), dstDir, file, up, dataType)

View File

@ -129,7 +129,7 @@ type BuiAuditInfo struct {
AuditInfo AuditInfo `json:"audit_info"`
IsAuditing bool `json:"is_auditing"`
AuditStatus int `json:"audit_status"`
LastUpdateTime int `json:"last_update_time"`
LastUpdateTime int64 `json:"last_update_time"`
UnpassReason string `json:"unpass_reason"`
Details Details `json:"details"`
}
@ -184,7 +184,7 @@ type UserInfo struct {
SecUserID string `json:"sec_user_id"`
SessionKey string `json:"session_key"`
UseHmRegion bool `json:"use_hm_region"`
UserCreateTime int `json:"user_create_time"`
UserCreateTime int64 `json:"user_create_time"`
UserID int64 `json:"user_id"`
UserIDStr string `json:"user_id_str"`
UserVerified bool `json:"user_verified"`

View File

@ -24,6 +24,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
@ -447,41 +448,67 @@ func (d *Doubao) uploadNode(uploadConfig *UploadConfig, dir model.Obj, file mode
}
// Upload 普通上传实现
func (d *Doubao) Upload(config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
data, err := io.ReadAll(file)
func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
ss, err := stream.NewStreamSectionReader(file, int(file.GetSize()), &up)
if err != nil {
return nil, err
}
reader, err := ss.GetSectionReader(0, file.GetSize())
if err != nil {
return nil, err
}
// 计算CRC32
crc32Hash := crc32.NewIEEE()
crc32Hash.Write(data)
w, err := utils.CopyWithBuffer(crc32Hash, reader)
if w != file.GetSize() {
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", file.GetSize(), w, err)
}
crc32Value := hex.EncodeToString(crc32Hash.Sum(nil))
// 构建请求路径
uploadNode := config.InnerUploadAddress.UploadNodes[0]
storeInfo := uploadNode.StoreInfos[0]
uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI)
uploadResp := UploadResp{}
if _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Type": "application/octet-stream",
"Content-Crc32": crc32Value,
"Content-Length": fmt.Sprintf("%d", len(data)),
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
})
req.SetBody(data)
}, &uploadResp); err != nil {
rateLimitedRd := driver.NewLimitedUploadStream(ctx, reader)
err = d._retryOperation("Upload", func() error {
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl, rateLimitedRd)
if err != nil {
return err
}
req.Header = map[string][]string{
"Referer": {BaseURL + "/"},
"Origin": {BaseURL},
"User-Agent": {UserAgent},
"X-Storage-U": {d.UserId},
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {fmt.Sprintf("%d", file.GetSize())},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
resp := UploadResp{}
utils.Json.Unmarshal(bytes, &resp)
if resp.Code != 2000 {
return fmt.Errorf("upload part failed: %s", resp.Message)
} else if resp.Data.Crc32 != crc32Value {
return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, resp.Data.Crc32)
}
return nil
})
ss.FreeSectionReader(reader)
if err != nil {
return nil, err
}
if uploadResp.Code != 2000 {
return nil, fmt.Errorf("upload failed: %s", uploadResp.Message)
}
uploadNodeResp, err := d.uploadNode(config, dstDir, file, dataType)
if err != nil {
return nil, err
@ -516,68 +543,107 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
if config.InnerUploadAddress.AdvanceOption.SliceSize > 0 {
chunkSize = int64(config.InnerUploadAddress.AdvanceOption.SliceSize)
}
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return nil, err
}
totalParts := (fileSize + chunkSize - 1) / chunkSize
// 创建分片信息组
parts := make([]UploadPart, totalParts)
// 缓存文件
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, fmt.Errorf("failed to cache file: %w", err)
}
up(10.0) // 更新进度
// 设置并行上传
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(1),
thread := min(int(totalParts), d.uploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(MaxRetryAttempts),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
retry.DelayType(retry.BackOffDelay),
retry.MaxJitter(200*time.Millisecond),
)
var partsMutex sync.Mutex
// 并行上传所有分片
for partIndex := int64(0); partIndex < totalParts; partIndex++ {
hash := crc32.NewIEEE()
for partIndex := range totalParts {
if utils.IsCanceled(uploadCtx) {
break
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片编号从1开始
threadG.Go(func(ctx context.Context) error {
// 计算此分片的大小和偏移
offset := partIndex * chunkSize
size := chunkSize
if partIndex == totalParts-1 {
size = fileSize - offset
}
limitedReader := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, size))
// 读取数据到内存
data, err := io.ReadAll(limitedReader)
if err != nil {
return fmt.Errorf("failed to read part %d: %w", partNumber, err)
}
// 计算CRC32
crc32Value := calculateCRC32(data)
// 使用_retryOperation上传分片
var uploadPart UploadPart
if err = d._retryOperation(fmt.Sprintf("Upload part %d", partNumber), func() error {
var err error
uploadPart, err = d.uploadPart(config, uploadUrl, uploadID, partNumber, data, crc32Value)
return err
}); err != nil {
return fmt.Errorf("part %d upload failed: %w", partNumber, err)
}
// 记录成功上传的分片
partsMutex.Lock()
parts[partIndex] = UploadPart{
PartNumber: strconv.FormatInt(partNumber, 10),
Etag: uploadPart.Etag,
Crc32: crc32Value,
}
partsMutex.Unlock()
// 更新进度
progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts)
up(math.Min(progress, 95.0))
return nil
// 计算此分片的大小和偏移
offset := partIndex * chunkSize
size := chunkSize
if partIndex == totalParts-1 {
size = fileSize - offset
}
var reader *stream.SectionReader
var rateLimitedRd io.Reader
crc32Value := ""
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
reader, err = ss.GetSectionReader(offset, size)
if err != nil {
return err
}
hash.Reset()
w, err := utils.CopyWithBuffer(hash, reader)
if w != size {
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", size, w, err)
}
crc32Value = hex.EncodeToString(hash.Sum(nil))
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
return nil
},
Do: func(ctx context.Context) error {
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s?uploadid=%s&part_number=%d&phase=transfer", uploadUrl, uploadID, partNumber), rateLimitedRd)
if err != nil {
return err
}
req.Header = map[string][]string{
"Referer": {BaseURL + "/"},
"Origin": {BaseURL},
"User-Agent": {UserAgent},
"X-Storage-U": {d.UserId},
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {fmt.Sprintf("%d", size)},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
uploadResp := UploadResp{}
utils.Json.Unmarshal(bytes, &uploadResp)
if uploadResp.Code != 2000 {
return fmt.Errorf("upload part failed: %s", uploadResp.Message)
} else if uploadResp.Data.Crc32 != crc32Value {
return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32)
}
// 记录成功上传的分片
partsMutex.Lock()
parts[partIndex] = UploadPart{
PartNumber: strconv.FormatInt(partNumber, 10),
Etag: uploadResp.Data.Etag,
Crc32: crc32Value,
}
partsMutex.Unlock()
// 更新进度
progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts)
up(math.Min(progress, 95.0))
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
})
}
@ -680,42 +746,6 @@ func (d *Doubao) initMultipartUpload(config *UploadConfig, uploadUrl string, sto
return uploadResp.Data.UploadId, nil
}
// 分片上传实现
func (d *Doubao) uploadPart(config *UploadConfig, uploadUrl, uploadID string, partNumber int64, data []byte, crc32Value string) (resp UploadPart, err error) {
uploadResp := UploadResp{}
storeInfo := config.InnerUploadAddress.UploadNodes[0].StoreInfos[0]
_, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Type": "application/octet-stream",
"Content-Crc32": crc32Value,
"Content-Length": fmt.Sprintf("%d", len(data)),
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
})
req.SetQueryParams(map[string]string{
"uploadid": uploadID,
"part_number": strconv.FormatInt(partNumber, 10),
"phase": "transfer",
})
req.SetBody(data)
req.SetContentLength(true)
}, &uploadResp)
if err != nil {
return resp, err
}
if uploadResp.Code != 2000 {
return resp, fmt.Errorf("upload part failed: %s", uploadResp.Message)
} else if uploadResp.Data.Crc32 != crc32Value {
return resp, fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32)
}
return uploadResp.Data, nil
}
// 完成分片上传
func (d *Doubao) completeMultipartUpload(config *UploadConfig, uploadUrl, uploadID string, parts []UploadPart) error {
uploadResp := UploadResp{}
@ -784,13 +814,6 @@ func (d *Doubao) commitMultipartUpload(uploadConfig *UploadConfig) error {
return nil
}
// 计算CRC32
func calculateCRC32(data []byte) string {
hash := crc32.NewIEEE()
hash.Write(data)
return hex.EncodeToString(hash.Sum(nil))
}
// _retryOperation 操作重试
func (d *Doubao) _retryOperation(operation string, fn func() error) error {
return retry.Do(

View File

@ -79,11 +79,11 @@ type ShareInfo struct {
RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"`
CreateTime int `json:"create_time"`
UpdateTime int `json:"update_time"`
CreateTime int64 `json:"create_time"`
UpdateTime int64 `json:"update_time"`
} `json:"first_node"`
NodeCount int `json:"node_count"`
CreateTime int `json:"create_time"`
CreateTime int64 `json:"create_time"`
Channel string `json:"channel"`
InfluencerType int `json:"influencer_type"`
}
@ -111,8 +111,8 @@ type FilePath []struct {
RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"`
CreateTime int `json:"create_time"`
UpdateTime int `json:"update_time"`
CreateTime int64 `json:"create_time"`
UpdateTime int64 `json:"update_time"`
}
type GetFileUrlResp struct {

View File

@ -192,12 +192,11 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
url := d.contentBase + "/2/files/upload_session/append_v2"
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize))
req, err := http.NewRequest(http.MethodPost, url, reader)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, reader)
if err != nil {
log.Errorf("failed to update file when append to upload session, err: %+v", err)
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken)

View File

@ -13,7 +13,7 @@ type Addition struct {
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
AccessToken string
RefreshToken string `json:"refresh_token" required:"true"`
RootNamespaceId string
RootNamespaceId string `json:"RootNamespaceId" required:"false"`
}
var config = driver.Config{

View File

@ -169,13 +169,19 @@ func (d *Dropbox) getFiles(ctx context.Context, path string) ([]File, error) {
func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset int64, sessionId string) error {
url := d.contentBase + "/2/files/upload_session/finish"
req, err := http.NewRequest(http.MethodPost, url, nil)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
if d.RootNamespaceId != "" {
apiPathRootJson, err := d.buildPathRootHeader()
if err != nil {
return err
}
req.Header.Set("Dropbox-API-Path-Root", apiPathRootJson)
}
uploadFinishArgs := UploadFinishArgs{
Commit: struct {
@ -214,13 +220,19 @@ func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset
func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
url := d.contentBase + "/2/files/upload_session/start"
req, err := http.NewRequest(http.MethodPost, url, nil)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
if err != nil {
return "", err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
if d.RootNamespaceId != "" {
apiPathRootJson, err := d.buildPathRootHeader()
if err != nil {
return "", err
}
req.Header.Set("Dropbox-API-Path-Root", apiPathRootJson)
}
req.Header.Set("Dropbox-API-Arg", "{\"close\":false}")
res, err := base.HttpClient.Do(req)
@ -235,3 +247,11 @@ func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
_ = res.Body.Close()
return sessionId, nil
}
func (d *Dropbox) buildPathRootHeader() (string, error) {
return utils.Json.MarshalToString(map[string]interface{}{
".tag": "root",
"root": d.RootNamespaceId,
})
}

View File

@ -31,13 +31,13 @@ func (c *customTokenSource) Token() (*oauth2.Token, error) {
v.Set("client_id", c.config.ClientID)
v.Set("client_secret", c.config.ClientSecret)
req, err := http.NewRequest("POST", c.config.TokenURL, strings.NewReader(v.Encode()))
req, err := http.NewRequestWithContext(c.ctx, http.MethodPost, c.config.TokenURL, strings.NewReader(v.Encode()))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := http.DefaultClient.Do(req.WithContext(c.ctx))
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}

View File

@ -2,12 +2,15 @@ package ftp
import (
"context"
"errors"
"io"
stdpath "path"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/jlaffaye/ftp"
)
@ -16,6 +19,9 @@ type FTP struct {
model.Storage
Addition
conn *ftp.ServerConn
ctx context.Context
cancel context.CancelFunc
}
func (d *FTP) Config() driver.Config {
@ -27,12 +33,16 @@ func (d *FTP) GetAddition() driver.Additional {
}
func (d *FTP) Init(ctx context.Context) error {
return d._login()
d.ctx, d.cancel = context.WithCancel(context.Background())
var err error
d.conn, err = d._login(ctx)
return err
}
func (d *FTP) Drop(ctx context.Context) error {
if d.conn != nil {
_ = d.conn.Logout()
_ = d.conn.Quit()
d.cancel()
}
return nil
}
@ -62,25 +72,51 @@ func (d *FTP) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]m
}
func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if err := d.login(); err != nil {
conn, err := d._login(ctx)
if err != nil {
return nil, err
}
remoteFile := NewFileReader(d.conn, encode(file.GetPath(), d.Encoding), file.GetSize())
if remoteFile != nil && !d.Config().OnlyLinkMFile {
return &model.Link{
RangeReader: &model.FileRangeReader{
RangeReaderIF: stream.RateLimitRangeReaderFunc(stream.GetRangeReaderFromMFile(file.GetSize(), remoteFile)),
},
SyncClosers: utils.NewSyncClosers(remoteFile),
path := encode(file.GetPath(), d.Encoding)
size := file.GetSize()
resultRangeReader := func(context context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
length := httpRange.Length
if length < 0 || httpRange.Start+length > size {
length = size - httpRange.Start
}
var c *ftp.ServerConn
if ctx == context {
c = conn
} else {
var err error
c, err = d._login(context)
if err != nil {
return nil, err
}
}
resp, err := c.RetrFrom(path, uint64(httpRange.Start))
if err != nil {
return nil, err
}
var close utils.CloseFunc
if context == ctx {
close = resp.Close
} else {
close = func() error {
return errors.Join(resp.Close(), c.Quit())
}
}
return utils.ReadCloser{
Reader: io.LimitReader(resp, length),
Closer: close,
}, nil
}
return &model.Link{
MFile: &stream.RateLimitFile{
File: remoteFile,
Limiter: stream.ServerDownloadLimit,
Ctx: ctx,
RangeReader: &model.FileRangeReader{
RangeReaderIF: stream.RateLimitRangeReaderFunc(resultRangeReader),
},
SyncClosers: utils.NewSyncClosers(utils.CloseFunc(conn.Quit)),
}, nil
}

View File

@ -33,7 +33,7 @@ type Addition struct {
var config = driver.Config{
Name: "FTP",
LocalSort: true,
OnlyLinkMFile: true,
OnlyLinkMFile: false,
DefaultRoot: "/",
NoLinkURL: true,
}

View File

@ -1,11 +1,8 @@
package ftp
import (
"context"
"fmt"
"io"
"os"
"sync"
"sync/atomic"
"time"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
@ -15,112 +12,32 @@ import (
// do others that not defined in Driver interface
func (d *FTP) login() error {
err, _, _ := singleflight.ErrorGroup.Do(fmt.Sprintf("FTP.login:%p", d), func() (error, error) {
return d._login(), nil
_, err, _ := singleflight.AnyGroup.Do(fmt.Sprintf("FTP.login:%p", d), func() (any, error) {
var err error
if d.conn != nil {
err = d.conn.NoOp()
if err != nil {
d.conn.Quit()
d.conn = nil
}
}
if d.conn == nil {
d.conn, err = d._login(d.ctx)
}
return nil, err
})
return err
}
func (d *FTP) _login() error {
if d.conn != nil {
_, err := d.conn.CurrentDir()
if err == nil {
return nil
}
}
conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second))
func (d *FTP) _login(ctx context.Context) (*ftp.ServerConn, error) {
conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second), ftp.DialWithContext(ctx))
if err != nil {
return err
return nil, err
}
err = conn.Login(d.Username, d.Password)
if err != nil {
return err
conn.Quit()
return nil, err
}
d.conn = conn
return nil
}
// FileReader An FTP file reader that implements io.MFile for seeking.
type FileReader struct {
conn *ftp.ServerConn
resp *ftp.Response
offset atomic.Int64
readAtOffset int64
mu sync.Mutex
path string
size int64
}
func NewFileReader(conn *ftp.ServerConn, path string, size int64) *FileReader {
return &FileReader{
conn: conn,
path: path,
size: size,
}
}
func (r *FileReader) Read(buf []byte) (n int, err error) {
n, err = r.ReadAt(buf, r.offset.Load())
r.offset.Add(int64(n))
return
}
func (r *FileReader) ReadAt(buf []byte, off int64) (n int, err error) {
if off < 0 {
return -1, os.ErrInvalid
}
r.mu.Lock()
defer r.mu.Unlock()
if off != r.readAtOffset {
//have to restart the connection, to correct offset
_ = r.resp.Close()
r.resp = nil
}
if r.resp == nil {
r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
r.readAtOffset = off
if err != nil {
return 0, err
}
}
n, err = r.resp.Read(buf)
r.readAtOffset += int64(n)
return
}
func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
oldOffset := r.offset.Load()
var newOffset int64
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekCurrent:
newOffset = oldOffset + offset
case io.SeekEnd:
return r.size, nil
default:
return -1, os.ErrInvalid
}
if newOffset < 0 {
// offset out of range
return oldOffset, os.ErrInvalid
}
if newOffset == oldOffset {
// offset not changed, so return directly
return oldOffset, nil
}
r.offset.Store(newOffset)
return newOffset, nil
}
func (r *FileReader) Close() error {
if r.resp != nil {
return r.resp.Close()
}
return nil
return conn, nil
}

View File

@ -162,7 +162,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
SetBody(driver.NewLimitedUploadStream(ctx, stream))
}, nil)
} else {
err = d.chunkUpload(ctx, stream, putUrl)
err = d.chunkUpload(ctx, stream, putUrl, up)
}
return err
}

View File

@ -5,17 +5,20 @@ import (
"crypto/x509"
"encoding/pem"
"fmt"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"io"
"net/http"
"os"
"regexp"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/avast/retry-go"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
"github.com/golang-jwt/jwt/v4"
@ -251,28 +254,60 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
return res, nil
}
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error {
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
var defaultChunkSize = d.ChunkSize * 1024 * 1024
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)
if err != nil {
return err
}
var offset int64 = 0
for offset < stream.GetSize() {
url += "?includeItemsFromAllDrives=true&supportsAllDrives=true"
for offset < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
chunkSize := stream.GetSize() - offset
if chunkSize > defaultChunkSize {
chunkSize = defaultChunkSize
}
reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize})
chunkSize := min(file.GetSize()-offset, defaultChunkSize)
reader, err := ss.GetSectionReader(offset, chunkSize)
if err != nil {
return err
}
reader = driver.NewLimitedUploadStream(ctx, reader)
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Length": strconv.FormatInt(chunkSize, 10),
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()),
}).SetBody(reader).SetContext(ctx)
}, nil)
limitedReader := driver.NewLimitedUploadStream(ctx, reader)
err = retry.Do(func() error {
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, limitedReader)
if err != nil {
return err
}
req.Header = map[string][]string{
"Authorization": {"Bearer " + d.AccessToken},
"Content-Length": {strconv.FormatInt(chunkSize, 10)},
"Content-Range": {fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, file.GetSize())},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
var e Error
utils.Json.Unmarshal(bytes, &e)
if e.Error.Code != 0 {
if e.Error.Code == 401 {
err = d.refreshToken()
if err != nil {
return err
}
}
return fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
}
up(float64(offset+chunkSize) / float64(file.GetSize()) * 100)
return nil
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
ss.FreeSectionReader(reader)
if err != nil {
return err
}

View File

@ -276,9 +276,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
etag := s.GetHash().GetHash(utils.MD5)
var err error
if len(etag) != utils.MD5.Width {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, etag, err = stream.CacheFullInTempFileAndHash(s, cacheFileProgress, utils.MD5)
_, etag, err = stream.CacheFullAndHash(s, &up, utils.MD5)
if err != nil {
return nil, err
}
@ -298,6 +296,23 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
return nil, err
}
upToken := utils.Json.Get(res, "upToken").ToString()
if upToken == "-1" {
// 支持秒传
var resp UploadTokenRapidResp
err := utils.Json.Unmarshal(res, &resp)
if err != nil {
return nil, err
}
return &model.Object{
ID: strconv.FormatInt(resp.Map.FileID, 10),
Name: resp.Map.FileName,
Size: s.GetSize(),
Modified: s.ModTime(),
Ctime: s.CreateTime(),
IsFolder: false,
HashInfo: utils.NewHashInfo(utils.MD5, etag),
}, nil
}
now := time.Now()
key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli())
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{

View File

@ -29,8 +29,10 @@ func init() {
op.RegisterDriver(func() driver.Driver {
return &ILanZou{
config: driver.Config{
Name: "ILanZou",
DefaultRoot: "0",
Name: "ILanZou",
DefaultRoot: "0",
LocalSort: true,
NoOverwriteUpload: true,
},
conf: Conf{
base: "https://api.ilanzou.com",
@ -46,8 +48,10 @@ func init() {
op.RegisterDriver(func() driver.Driver {
return &ILanZou{
config: driver.Config{
Name: "FeijiPan",
DefaultRoot: "0",
Name: "FeijiPan",
DefaultRoot: "0",
LocalSort: true,
NoOverwriteUpload: true,
},
conf: Conf{
base: "https://api.feijipan.com",

View File

@ -43,6 +43,18 @@ type Part struct {
ETag string `json:"etag"`
}
type UploadTokenRapidResp struct {
Msg string `json:"msg"`
Code int `json:"code"`
UpToken string `json:"upToken"`
Map struct {
FileIconID int `json:"fileIconId"`
FileName string `json:"fileName"`
FileIcon string `json:"fileIcon"`
FileID int64 `json:"fileId"`
} `json:"map"`
}
type UploadResultResp struct {
Msg string `json:"msg"`
Code int `json:"code"`

View File

@ -3,6 +3,7 @@ package LenovoNasShare
import (
"context"
"net/http"
"net/url"
"strings"
"time"
@ -71,7 +72,23 @@ func (d *LenovoNasShare) List(ctx context.Context, dir model.Obj, args model.Lis
files = append(files, resp.Data.List...)
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
return src, nil
if src.IsDir() {
return src, nil
}
return &model.ObjThumb{
Object: model.Object{
Name: src.GetName(),
Size: src.GetSize(),
Modified: src.ModTime(),
IsFolder: src.IsDir(),
},
Thumbnail: model.Thumbnail{
Thumbnail: func() string {
thumbUrl := d.Host + "/oneproxy/api/share/v1/file/thumb?code=" + d.ShareId + "&stoken=" + d.stoken + "&path=" + url.QueryEscape(src.GetPath())
return thumbUrl
}(),
},
}, nil
})
}

View File

@ -0,0 +1,92 @@
package local
// TestDirCalculateSize tests the directory size calculation
// It should be run with the local driver enabled and directory size calculation set to true
import (
"os"
"path/filepath"
"strconv"
"testing"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
)
func generatedTestDir(dir string, dep, filecount int) {
if dep == 0 {
return
}
for i := 0; i < dep; i++ {
subDir := dir + "/dir" + strconv.Itoa(i)
os.Mkdir(subDir, 0755)
generatedTestDir(subDir, dep-1, filecount)
generatedFiles(subDir, filecount)
}
}
func generatedFiles(path string, count int) error {
for i := 0; i < count; i++ {
filePath := filepath.Join(path, "file"+strconv.Itoa(i)+".txt")
file, err := os.Create(filePath)
if err != nil {
return err
}
// 使用随机ascii字符填充文件
content := make([]byte, 1024) // 1KB file
for j := range content {
content[j] = byte('a' + j%26) // Fill with 'a' to 'z'
}
_, err = file.Write(content)
if err != nil {
return err
}
file.Close()
}
return nil
}
// performance tests for directory size calculation
func BenchmarkCalculateDirSize(t *testing.B) {
// 初始化t的日志
t.Logf("Starting performance test for directory size calculation")
// 确保测试目录存在
if testing.Short() {
t.Skip("Skipping performance test in short mode")
}
// 创建tmp directory for testing
testTempDir := t.TempDir()
err := os.MkdirAll(testTempDir, 0755)
if err != nil {
t.Fatalf("Failed to create test directory: %v", err)
}
defer os.RemoveAll(testTempDir) // Clean up after test
// 构建一个深度为5每层10个文件和10个目录的目录结构
generatedTestDir(testTempDir, 5, 10)
// Initialize the local driver with directory size calculation enabled
d := &Local{
directoryMap: DirectoryMap{
root: testTempDir,
},
Addition: Addition{
DirectorySize: true,
RootPath: driver.RootPath{
RootFolderPath: testTempDir,
},
},
}
//record the start time
t.StartTimer()
// Calculate the directory size
err = d.directoryMap.RecalculateDirSize()
if err != nil {
t.Fatalf("Failed to calculate directory size: %v", err)
}
//record the end time
t.StopTimer()
// Print the size and duration
node, ok := d.directoryMap.Get(d.directoryMap.root)
if !ok {
t.Fatalf("Failed to get root node from directory map")
}
t.Logf("Directory size: %d bytes", node.fileSum+node.directorySum)
t.Logf("Performance test completed successfully")
}

View File

@ -33,6 +33,9 @@ type Local struct {
Addition
mkdirPerm int32
// directory size data
directoryMap DirectoryMap
// zero means no limit
thumbConcurrency int
thumbTokenBucket TokenBucket
@ -66,6 +69,15 @@ func (d *Local) Init(ctx context.Context) error {
}
d.Addition.RootFolderPath = abs
}
if d.DirectorySize {
d.directoryMap.root = d.GetRootPath()
_, err := d.directoryMap.CalculateDirSize(d.GetRootPath())
if err != nil {
return err
}
} else {
d.directoryMap.Clear()
}
if d.ThumbCacheFolder != "" && !utils.Exists(d.ThumbCacheFolder) {
err := os.MkdirAll(d.ThumbCacheFolder, os.FileMode(d.mkdirPerm))
if err != nil {
@ -124,6 +136,9 @@ func (d *Local) GetAddition() driver.Additional {
func (d *Local) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
fullPath := dir.GetPath()
rawFiles, err := readDir(fullPath)
if d.DirectorySize && args.Refresh {
d.directoryMap.RecalculateDirSize()
}
if err != nil {
return nil, err
}
@ -147,7 +162,12 @@ func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string
}
isFolder := f.IsDir() || isSymlinkDir(f, fullPath)
var size int64
if !isFolder {
if isFolder {
node, ok := d.directoryMap.Get(filepath.Join(fullPath, f.Name()))
if ok {
size = node.fileSum + node.directorySum
}
} else {
size = f.Size()
}
var ctime time.Time
@ -186,7 +206,12 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
isFolder := f.IsDir() || isSymlinkDir(f, path)
size := f.Size()
if isFolder {
size = 0
node, ok := d.directoryMap.Get(path)
if ok {
size = node.fileSum + node.directorySum
}
} else {
size = f.Size()
}
var ctime time.Time
t, err := times.Stat(path)
@ -245,13 +270,12 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if err != nil {
return nil, err
}
link.ContentLength = file.GetSize()
link.MFile = open
}
if link.MFile != nil && !d.Config().OnlyLinkMFile {
link.AddIfCloser(link.MFile)
link.RangeReader = &model.FileRangeReader{
RangeReaderIF: stream.GetRangeReaderFromMFile(file.GetSize(), link.MFile),
}
link.AddIfCloser(link.MFile)
if !d.Config().OnlyLinkMFile {
link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, link.MFile)
link.MFile = nil
}
return link, nil
@ -272,22 +296,31 @@ func (d *Local) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
if utils.IsSubPath(srcPath, dstPath) {
return fmt.Errorf("the destination folder is a subfolder of the source folder")
}
if err := os.Rename(srcPath, dstPath); err != nil && strings.Contains(err.Error(), "invalid cross-device link") {
// Handle cross-device file move in local driver
if err = d.Copy(ctx, srcObj, dstDir); err != nil {
return err
} else {
// Directly remove file without check recycle bin if successfully copied
if srcObj.IsDir() {
err = os.RemoveAll(srcObj.GetPath())
} else {
err = os.Remove(srcObj.GetPath())
}
err := os.Rename(srcPath, dstPath)
if err != nil && strings.Contains(err.Error(), "invalid cross-device link") {
// 跨设备移动,先复制再删除
if err := d.Copy(ctx, srcObj, dstDir); err != nil {
return err
}
} else {
return err
// 复制成功后直接删除源文件/文件夹
if srcObj.IsDir() {
return os.RemoveAll(srcObj.GetPath())
}
return os.Remove(srcObj.GetPath())
}
if err == nil {
srcParent := filepath.Dir(srcPath)
dstParent := filepath.Dir(dstPath)
if d.directoryMap.Has(srcParent) {
d.directoryMap.UpdateDirSize(srcParent)
d.directoryMap.UpdateDirParents(srcParent)
}
if d.directoryMap.Has(dstParent) {
d.directoryMap.UpdateDirSize(dstParent)
d.directoryMap.UpdateDirParents(dstParent)
}
}
return err
}
func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
@ -297,6 +330,14 @@ func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) er
if err != nil {
return err
}
if srcObj.IsDir() {
if d.directoryMap.Has(srcPath) {
d.directoryMap.DeleteDirNode(srcPath)
d.directoryMap.CalculateDirSize(dstPath)
}
}
return nil
}
@ -307,11 +348,21 @@ func (d *Local) Copy(_ context.Context, srcObj, dstDir model.Obj) error {
return fmt.Errorf("the destination folder is a subfolder of the source folder")
}
// Copy using otiai10/copy to perform more secure & efficient copy
return cp.Copy(srcPath, dstPath, cp.Options{
err := cp.Copy(srcPath, dstPath, cp.Options{
Sync: true, // Sync file to disk after copy, may have performance penalty in filesystem such as ZFS
PreserveTimes: true,
PreserveOwner: true,
})
if err != nil {
return err
}
if d.directoryMap.Has(filepath.Dir(dstPath)) {
d.directoryMap.UpdateDirSize(filepath.Dir(dstPath))
d.directoryMap.UpdateDirParents(filepath.Dir(dstPath))
}
return nil
}
func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
@ -332,6 +383,19 @@ func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
if err != nil {
return err
}
if obj.IsDir() {
if d.directoryMap.Has(obj.GetPath()) {
d.directoryMap.DeleteDirNode(obj.GetPath())
d.directoryMap.UpdateDirSize(filepath.Dir(obj.GetPath()))
d.directoryMap.UpdateDirParents(filepath.Dir(obj.GetPath()))
}
} else {
if d.directoryMap.Has(filepath.Dir(obj.GetPath())) {
d.directoryMap.UpdateDirSize(filepath.Dir(obj.GetPath()))
d.directoryMap.UpdateDirParents(filepath.Dir(obj.GetPath()))
}
}
return nil
}
@ -355,6 +419,11 @@ func (d *Local) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
if err != nil {
log.Errorf("[local] failed to change time of %s: %s", fullPath, err)
}
if d.directoryMap.Has(dstDir.GetPath()) {
d.directoryMap.UpdateDirSize(dstDir.GetPath())
d.directoryMap.UpdateDirParents(dstDir.GetPath())
}
return nil
}

View File

@ -7,6 +7,7 @@ import (
type Addition struct {
driver.RootPath
DirectorySize bool `json:"directory_size" default:"false" help:"This might impact host performance"`
Thumbnail bool `json:"thumbnail" required:"true" help:"enable thumbnail"`
ThumbCacheFolder string `json:"thumb_cache_folder"`
ThumbConcurrency string `json:"thumb_concurrency" default:"16" required:"false" help:"Number of concurrent thumbnail generation goroutines. This controls how many thumbnails can be generated in parallel."`
@ -27,6 +28,8 @@ var config = driver.Config{
func init() {
op.RegisterDriver(func() driver.Driver {
return &Local{}
return &Local{
directoryMap: DirectoryMap{},
}
})
}

View File

@ -7,9 +7,12 @@ import (
"io/fs"
"os"
"path/filepath"
"runtime"
"slices"
"sort"
"strconv"
"strings"
"sync"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/model"
@ -19,7 +22,8 @@ import (
)
func isSymlinkDir(f fs.FileInfo, path string) bool {
if f.Mode()&os.ModeSymlink == os.ModeSymlink {
if f.Mode()&os.ModeSymlink == os.ModeSymlink ||
(runtime.GOOS == "windows" && f.Mode()&os.ModeIrregular == os.ModeIrregular) { // os.ModeIrregular is Junction bit in Windows
dst, err := os.Readlink(filepath.Join(path, f.Name()))
if err != nil {
return false
@ -151,3 +155,253 @@ func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
}
return &buf, nil, nil
}
type DirectoryMap struct {
root string
data sync.Map
}
type DirectoryNode struct {
fileSum int64
directorySum int64
children []string
}
type DirectoryTask struct {
path string
cache *DirectoryTaskCache
}
type DirectoryTaskCache struct {
fileSum int64
children []string
}
func (m *DirectoryMap) Has(path string) bool {
_, ok := m.data.Load(path)
return ok
}
func (m *DirectoryMap) Get(path string) (*DirectoryNode, bool) {
value, ok := m.data.Load(path)
if !ok {
return &DirectoryNode{}, false
}
node, ok := value.(*DirectoryNode)
if !ok {
return &DirectoryNode{}, false
}
return node, true
}
func (m *DirectoryMap) Set(path string, node *DirectoryNode) {
m.data.Store(path, node)
}
func (m *DirectoryMap) Delete(path string) {
m.data.Delete(path)
}
func (m *DirectoryMap) Clear() {
m.data.Clear()
}
func (m *DirectoryMap) RecalculateDirSize() error {
m.Clear()
if m.root == "" {
return fmt.Errorf("root path is not set")
}
size, err := m.CalculateDirSize(m.root)
if err != nil {
return err
}
if node, ok := m.Get(m.root); ok {
node.fileSum = size
node.directorySum = size
}
return nil
}
func (m *DirectoryMap) CalculateDirSize(dirname string) (int64, error) {
stack := []DirectoryTask{
{path: dirname},
}
for len(stack) > 0 {
task := stack[len(stack)-1]
stack = stack[:len(stack)-1]
if task.cache != nil {
directorySum := int64(0)
for _, filename := range task.cache.children {
child, ok := m.Get(filepath.Join(task.path, filename))
if !ok {
return 0, fmt.Errorf("child node not found")
}
directorySum += child.fileSum + child.directorySum
}
m.Set(task.path, &DirectoryNode{
fileSum: task.cache.fileSum,
directorySum: directorySum,
children: task.cache.children,
})
continue
}
files, err := readDir(task.path)
if err != nil {
return 0, err
}
fileSum := int64(0)
directorySum := int64(0)
children := []string{}
queue := []DirectoryTask{}
for _, f := range files {
fullpath := filepath.Join(task.path, f.Name())
isFolder := f.IsDir() || isSymlinkDir(f, fullpath)
if isFolder {
if node, ok := m.Get(fullpath); ok {
directorySum += node.fileSum + node.directorySum
} else {
queue = append(queue, DirectoryTask{
path: fullpath,
})
}
children = append(children, f.Name())
} else {
fileSum += f.Size()
}
}
if len(queue) > 0 {
stack = append(stack, DirectoryTask{
path: task.path,
cache: &DirectoryTaskCache{
fileSum: fileSum,
children: children,
},
})
stack = append(stack, queue...)
continue
}
m.Set(task.path, &DirectoryNode{
fileSum: fileSum,
directorySum: directorySum,
children: children,
})
}
if node, ok := m.Get(dirname); ok {
return node.fileSum + node.directorySum, nil
}
return 0, nil
}
func (m *DirectoryMap) UpdateDirSize(dirname string) (int64, error) {
node, ok := m.Get(dirname)
if !ok {
return 0, fmt.Errorf("directory node not found")
}
files, err := readDir(dirname)
if err != nil {
return 0, err
}
fileSum := int64(0)
directorySum := int64(0)
children := []string{}
for _, f := range files {
fullpath := filepath.Join(dirname, f.Name())
isFolder := f.IsDir() || isSymlinkDir(f, fullpath)
if isFolder {
if node, ok := m.Get(fullpath); ok {
directorySum += node.fileSum + node.directorySum
} else {
value, err := m.CalculateDirSize(fullpath)
if err != nil {
return 0, err
}
directorySum += value
}
children = append(children, f.Name())
} else {
fileSum += f.Size()
}
}
for _, c := range node.children {
if !slices.Contains(children, c) {
m.DeleteDirNode(filepath.Join(dirname, c))
}
}
node.fileSum = fileSum
node.directorySum = directorySum
node.children = children
return fileSum + directorySum, nil
}
func (m *DirectoryMap) UpdateDirParents(dirname string) error {
parentPath := filepath.Dir(dirname)
for parentPath != m.root && !strings.HasPrefix(m.root, parentPath) {
if node, ok := m.Get(parentPath); ok {
directorySum := int64(0)
for _, c := range node.children {
child, ok := m.Get(filepath.Join(parentPath, c))
if !ok {
return fmt.Errorf("child node not found")
}
directorySum += child.fileSum + child.directorySum
}
node.directorySum = directorySum
}
parentPath = filepath.Dir(parentPath)
}
return nil
}
func (m *DirectoryMap) DeleteDirNode(dirname string) error {
stack := []string{dirname}
for len(stack) > 0 {
current := stack[len(stack)-1]
stack = stack[:len(stack)-1]
if node, ok := m.Get(current); ok {
for _, filename := range node.children {
stack = append(stack, filepath.Join(current, filename))
}
m.Delete(current)
}
}
return nil
}

View File

@ -180,7 +180,7 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, file model.FileS
if err != nil {
return err
}
tempFile, err := file.CacheFullInTempFile()
tempFile, err := file.CacheFullAndWriter(&up, nil)
if err != nil {
return err
}

View File

@ -4,6 +4,7 @@ import (
"context"
"errors"
"io"
"net/http"
"time"
"github.com/go-resty/resty/v2"
@ -72,7 +73,7 @@ func (d *Misskey) getFiles(dir model.Obj) ([]model.Obj, error) {
} else {
body = map[string]string{}
}
err := d.request("/files", "POST", setBody(body), &files)
err := d.request("/files", http.MethodPost, setBody(body), &files)
if err != nil {
return []model.Obj{}, err
}
@ -89,7 +90,7 @@ func (d *Misskey) getFolders(dir model.Obj) ([]model.Obj, error) {
} else {
body = map[string]string{}
}
err := d.request("/folders", "POST", setBody(body), &folders)
err := d.request("/folders", http.MethodPost, setBody(body), &folders)
if err != nil {
return []model.Obj{}, err
}
@ -106,7 +107,7 @@ func (d *Misskey) list(dir model.Obj) ([]model.Obj, error) {
func (d *Misskey) link(file model.Obj) (*model.Link, error) {
var mFile MFile
err := d.request("/files/show", "POST", setBody(map[string]string{"fileId": file.GetID()}), &mFile)
err := d.request("/files/show", http.MethodPost, setBody(map[string]string{"fileId": file.GetID()}), &mFile)
if err != nil {
return nil, err
}
@ -117,7 +118,7 @@ func (d *Misskey) link(file model.Obj) (*model.Link, error) {
func (d *Misskey) makeDir(parentDir model.Obj, dirName string) (model.Obj, error) {
var folder MFolder
err := d.request("/folders/create", "POST", setBody(map[string]interface{}{"parentId": handleFolderId(parentDir), "name": dirName}), &folder)
err := d.request("/folders/create", http.MethodPost, setBody(map[string]interface{}{"parentId": handleFolderId(parentDir), "name": dirName}), &folder)
if err != nil {
return nil, err
}
@ -127,11 +128,11 @@ func (d *Misskey) makeDir(parentDir model.Obj, dirName string) (model.Obj, error
func (d *Misskey) move(srcObj, dstDir model.Obj) (model.Obj, error) {
if srcObj.IsDir() {
var folder MFolder
err := d.request("/folders/update", "POST", setBody(map[string]interface{}{"folderId": srcObj.GetID(), "parentId": handleFolderId(dstDir)}), &folder)
err := d.request("/folders/update", http.MethodPost, setBody(map[string]interface{}{"folderId": srcObj.GetID(), "parentId": handleFolderId(dstDir)}), &folder)
return mFolder2Object(folder), err
} else {
var file MFile
err := d.request("/files/update", "POST", setBody(map[string]interface{}{"fileId": srcObj.GetID(), "folderId": handleFolderId(dstDir)}), &file)
err := d.request("/files/update", http.MethodPost, setBody(map[string]interface{}{"fileId": srcObj.GetID(), "folderId": handleFolderId(dstDir)}), &file)
return mFile2Object(file), err
}
}
@ -139,11 +140,11 @@ func (d *Misskey) move(srcObj, dstDir model.Obj) (model.Obj, error) {
func (d *Misskey) rename(srcObj model.Obj, newName string) (model.Obj, error) {
if srcObj.IsDir() {
var folder MFolder
err := d.request("/folders/update", "POST", setBody(map[string]string{"folderId": srcObj.GetID(), "name": newName}), &folder)
err := d.request("/folders/update", http.MethodPost, setBody(map[string]string{"folderId": srcObj.GetID(), "name": newName}), &folder)
return mFolder2Object(folder), err
} else {
var file MFile
err := d.request("/files/update", "POST", setBody(map[string]string{"fileId": srcObj.GetID(), "name": newName}), &file)
err := d.request("/files/update", http.MethodPost, setBody(map[string]string{"fileId": srcObj.GetID(), "name": newName}), &file)
return mFile2Object(file), err
}
}
@ -171,7 +172,7 @@ func (d *Misskey) copy(srcObj, dstDir model.Obj) (model.Obj, error) {
if err != nil {
return nil, err
}
err = d.request("/files/upload-from-url", "POST", setBody(map[string]interface{}{"url": url.URL, "folderId": handleFolderId(dstDir)}), &file)
err = d.request("/files/upload-from-url", http.MethodPost, setBody(map[string]interface{}{"url": url.URL, "folderId": handleFolderId(dstDir)}), &file)
if err != nil {
return nil, err
}
@ -181,10 +182,10 @@ func (d *Misskey) copy(srcObj, dstDir model.Obj) (model.Obj, error) {
func (d *Misskey) remove(obj model.Obj) error {
if obj.IsDir() {
err := d.request("/folders/delete", "POST", setBody(map[string]string{"folderId": obj.GetID()}), nil)
err := d.request("/folders/delete", http.MethodPost, setBody(map[string]string{"folderId": obj.GetID()}), nil)
return err
} else {
err := d.request("/files/delete", "POST", setBody(map[string]string{"fileId": obj.GetID()}), nil)
err := d.request("/files/delete", http.MethodPost, setBody(map[string]string{"fileId": obj.GetID()}), nil)
return err
}
}

View File

@ -263,7 +263,7 @@ func (d *MoPan) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
file, err := stream.CacheFullInTempFile()
file, err := stream.CacheFullAndWriter(&up, nil)
if err != nil {
return nil, err
}

View File

@ -55,9 +55,7 @@ func (lrc *LyricObj) getProxyLink(ctx context.Context) *model.Link {
func (lrc *LyricObj) getLyricLink() *model.Link {
return &model.Link{
RangeReader: &model.FileRangeReader{
RangeReaderIF: stream.GetRangeReaderFromMFile(int64(len(lrc.lyric)), strings.NewReader(lrc.lyric)),
},
RangeReader: stream.GetRangeReaderFromMFile(int64(len(lrc.lyric)), strings.NewReader(lrc.lyric)),
}
}

View File

@ -223,7 +223,7 @@ func (d *NeteaseMusic) removeSongObj(file model.Obj) error {
}
func (d *NeteaseMusic) putSongStream(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) error {
tmp, err := stream.CacheFullInTempFile()
tmp, err := stream.CacheFullAndWriter(&up, nil)
if err != nil {
return err
}

View File

@ -1,7 +1,6 @@
package onedrive
import (
"bytes"
"context"
"errors"
"fmt"
@ -15,6 +14,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
@ -238,26 +238,29 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
if err != nil {
return err
}
DEFAULT := d.ChunkSize * 1024 * 1024
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
var finish int64 = 0
DEFAULT := d.ChunkSize * 1024 * 1024
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Onedrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
func() error {
utils.Log.Debugf("[Onedrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl,
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
@ -283,6 +286,7 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}

View File

@ -1,7 +1,6 @@
package onedrive_app
import (
"bytes"
"context"
"errors"
"fmt"
@ -15,6 +14,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
@ -152,26 +152,29 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
if err != nil {
return err
}
DEFAULT := d.ChunkSize * 1024 * 1024
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
var finish int64 = 0
DEFAULT := d.ChunkSize * 1024 * 1024
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[OnedriveAPP] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
func() error {
utils.Log.Debugf("[OnedriveAPP] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl,
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
@ -197,6 +200,7 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}

View File

@ -38,14 +38,14 @@ func (d *OnedriveSharelink) Init(ctx context.Context) error {
d.cron = cron.NewCron(time.Hour * 1)
d.cron.Do(func() {
var err error
d.Headers, err = d.getHeaders()
d.Headers, err = d.getHeaders(ctx)
if err != nil {
log.Errorf("%+v", err)
}
})
// Get initial headers
d.Headers, err = d.getHeaders()
d.Headers, err = d.getHeaders(ctx)
if err != nil {
return err
}
@ -59,7 +59,7 @@ func (d *OnedriveSharelink) Drop(ctx context.Context) error {
func (d *OnedriveSharelink) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
path := dir.GetPath()
files, err := d.getFiles(path)
files, err := d.getFiles(ctx, path)
if err != nil {
return nil, err
}
@ -82,7 +82,7 @@ func (d *OnedriveSharelink) Link(ctx context.Context, file model.Obj, args model
if d.HeaderTime < time.Now().Unix()-1800 {
var err error
log.Debug("headers are older than 30 minutes, get new headers")
header, err = d.getHeaders()
header, err = d.getHeaders(ctx)
if err != nil {
return nil, err
}

View File

@ -1,6 +1,7 @@
package onedrive_sharelink
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
@ -131,7 +132,7 @@ func getAttrValue(n *html.Node, key string) string {
}
// getHeaders constructs and returns the necessary HTTP headers for accessing the OneDrive share link
func (d *OnedriveSharelink) getHeaders() (http.Header, error) {
func (d *OnedriveSharelink) getHeaders(ctx context.Context) (http.Header, error) {
header := http.Header{}
header.Set("User-Agent", base.UserAgent)
header.Set("accept-language", "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6")
@ -142,7 +143,7 @@ func (d *OnedriveSharelink) getHeaders() (http.Header, error) {
if d.ShareLinkPassword == "" {
// Create a no-redirect client
clientNoDirect := NewNoRedirectCLient()
req, err := http.NewRequest("GET", d.ShareLinkURL, nil)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, d.ShareLinkURL, nil)
if err != nil {
return nil, err
}
@ -180,9 +181,9 @@ func (d *OnedriveSharelink) getHeaders() (http.Header, error) {
}
// getFiles retrieves the files from the OneDrive share link at the specified path
func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) {
func (d *OnedriveSharelink) getFiles(ctx context.Context, path string) ([]Item, error) {
clientNoDirect := NewNoRedirectCLient()
req, err := http.NewRequest("GET", d.ShareLinkURL, nil)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, d.ShareLinkURL, nil)
if err != nil {
return nil, err
}
@ -221,11 +222,11 @@ func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) {
// Get redirectUrl
answer, err := clientNoDirect.Do(req)
if err != nil {
d.Headers, err = d.getHeaders()
d.Headers, err = d.getHeaders(ctx)
if err != nil {
return nil, err
}
return d.getFiles(path)
return d.getFiles(ctx, path)
}
defer answer.Body.Close()
re := regexp.MustCompile(`templateUrl":"(.*?)"`)
@ -290,7 +291,7 @@ func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) {
client := &http.Client{}
postUrl := strings.Join(redirectSplitURL[:len(redirectSplitURL)-3], "/") + "/_api/v2.1/graphql"
req, err = http.NewRequest("POST", postUrl, strings.NewReader(graphqlVar))
req, err = http.NewRequest(http.MethodPost, postUrl, strings.NewReader(graphqlVar))
if err != nil {
return nil, err
}
@ -298,11 +299,11 @@ func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) {
resp, err := client.Do(req)
if err != nil {
d.Headers, err = d.getHeaders()
d.Headers, err = d.getHeaders(ctx)
if err != nil {
return nil, err
}
return d.getFiles(path)
return d.getFiles(ctx, path)
}
defer resp.Body.Close()
var graphqlReq GraphQLRequest
@ -323,31 +324,31 @@ func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) {
graphqlReqNEW := GraphQLNEWRequest{}
postUrl = strings.Join(redirectSplitURL[:len(redirectSplitURL)-3], "/") + "/_api/web/GetListUsingPath(DecodedUrl=@a1)/RenderListDataAsStream" + nextHref
req, _ = http.NewRequest("POST", postUrl, strings.NewReader(renderListDataAsStreamVar))
req, _ = http.NewRequest(http.MethodPost, postUrl, strings.NewReader(renderListDataAsStreamVar))
req.Header = tempHeader
resp, err := client.Do(req)
if err != nil {
d.Headers, err = d.getHeaders()
d.Headers, err = d.getHeaders(ctx)
if err != nil {
return nil, err
}
return d.getFiles(path)
return d.getFiles(ctx, path)
}
defer resp.Body.Close()
json.NewDecoder(resp.Body).Decode(&graphqlReqNEW)
for graphqlReqNEW.ListData.NextHref != "" {
graphqlReqNEW = GraphQLNEWRequest{}
postUrl = strings.Join(redirectSplitURL[:len(redirectSplitURL)-3], "/") + "/_api/web/GetListUsingPath(DecodedUrl=@a1)/RenderListDataAsStream" + nextHref
req, _ = http.NewRequest("POST", postUrl, strings.NewReader(renderListDataAsStreamVar))
req, _ = http.NewRequest(http.MethodPost, postUrl, strings.NewReader(renderListDataAsStreamVar))
req.Header = tempHeader
resp, err := client.Do(req)
if err != nil {
d.Headers, err = d.getHeaders()
d.Headers, err = d.getHeaders(ctx)
if err != nil {
return nil, err
}
return d.getFiles(path)
return d.getFiles(ctx, path)
}
defer resp.Body.Close()
json.NewDecoder(resp.Body).Decode(&graphqlReqNEW)

View File

@ -0,0 +1,181 @@
package openlist_share
import (
"context"
"fmt"
"net/http"
"net/url"
stdpath "path"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
"github.com/go-resty/resty/v2"
)
type OpenListShare struct {
model.Storage
Addition
serverArchivePreview bool
}
func (d *OpenListShare) Config() driver.Config {
return config
}
func (d *OpenListShare) GetAddition() driver.Additional {
return &d.Addition
}
func (d *OpenListShare) Init(ctx context.Context) error {
d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/")
var settings common.Resp[map[string]string]
_, _, err := d.request("/public/settings", http.MethodGet, func(req *resty.Request) {
req.SetResult(&settings)
})
if err != nil {
return err
}
d.serverArchivePreview = settings.Data["share_archive_preview"] == "true"
return nil
}
func (d *OpenListShare) Drop(ctx context.Context) error {
return nil
}
func (d *OpenListShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
var resp common.Resp[FsListResp]
_, _, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ListReq{
PageReq: model.PageReq{
Page: 1,
PerPage: 0,
},
Path: stdpath.Join(fmt.Sprintf("/@s/%s", d.ShareId), dir.GetPath()),
Password: d.Pwd,
Refresh: false,
})
})
if err != nil {
return nil, err
}
var files []model.Obj
for _, f := range resp.Data.Content {
file := model.ObjThumb{
Object: model.Object{
Name: f.Name,
Modified: f.Modified,
Ctime: f.Created,
Size: f.Size,
IsFolder: f.IsDir,
HashInfo: utils.FromString(f.HashInfo),
},
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
}
files = append(files, &file)
}
return files, nil
}
func (d *OpenListShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
path := utils.FixAndCleanPath(stdpath.Join(d.ShareId, file.GetPath()))
u := fmt.Sprintf("%s/sd%s?pwd=%s", d.Address, path, d.Pwd)
return &model.Link{URL: u}, nil
}
func (d *OpenListShare) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
if !d.serverArchivePreview || !d.ForwardArchiveReq {
return nil, errs.NotImplement
}
var resp common.Resp[ArchiveMetaResp]
_, code, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ArchiveMetaReq{
ArchivePass: args.Password,
Path: stdpath.Join(fmt.Sprintf("/@s/%s", d.ShareId), obj.GetPath()),
Password: d.Pwd,
Refresh: false,
})
})
if code == 202 {
return nil, errs.WrongArchivePassword
}
if err != nil {
return nil, err
}
var tree []model.ObjTree
if resp.Data.Content != nil {
tree = make([]model.ObjTree, 0, len(resp.Data.Content))
for _, content := range resp.Data.Content {
tree = append(tree, &content)
}
}
return &model.ArchiveMetaInfo{
Comment: resp.Data.Comment,
Encrypted: resp.Data.Encrypted,
Tree: tree,
}, nil
}
func (d *OpenListShare) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
if !d.serverArchivePreview || !d.ForwardArchiveReq {
return nil, errs.NotImplement
}
var resp common.Resp[ArchiveListResp]
_, code, err := d.request("/fs/archive/list", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ArchiveListReq{
ArchiveMetaReq: ArchiveMetaReq{
ArchivePass: args.Password,
Path: stdpath.Join(fmt.Sprintf("/@s/%s", d.ShareId), obj.GetPath()),
Password: d.Pwd,
Refresh: false,
},
PageReq: model.PageReq{
Page: 1,
PerPage: 0,
},
InnerPath: args.InnerPath,
})
})
if code == 202 {
return nil, errs.WrongArchivePassword
}
if err != nil {
return nil, err
}
var files []model.Obj
for _, f := range resp.Data.Content {
file := model.ObjThumb{
Object: model.Object{
Name: f.Name,
Modified: f.Modified,
Ctime: f.Created,
Size: f.Size,
IsFolder: f.IsDir,
HashInfo: utils.FromString(f.HashInfo),
},
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
}
files = append(files, &file)
}
return files, nil
}
func (d *OpenListShare) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
if !d.serverArchivePreview || !d.ForwardArchiveReq {
return nil, errs.NotSupport
}
path := utils.FixAndCleanPath(stdpath.Join(d.ShareId, obj.GetPath()))
u := fmt.Sprintf("%s/sad%s?pwd=%s&inner=%s&pass=%s",
d.Address,
path,
d.Pwd,
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password))
return &model.Link{URL: u}, nil
}
var _ driver.Driver = (*OpenListShare)(nil)

View File

@ -0,0 +1,27 @@
package openlist_share
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
driver.RootPath
Address string `json:"url" required:"true"`
ShareId string `json:"sid" required:"true"`
Pwd string `json:"pwd"`
ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"`
}
var config = driver.Config{
Name: "OpenListShare",
LocalSort: true,
NoUpload: true,
DefaultRoot: "/",
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &OpenListShare{}
})
}

View File

@ -0,0 +1,111 @@
package openlist_share
import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
type ListReq struct {
model.PageReq
Path string `json:"path" form:"path"`
Password string `json:"password" form:"password"`
Refresh bool `json:"refresh"`
}
type ObjResp struct {
Name string `json:"name"`
Size int64 `json:"size"`
IsDir bool `json:"is_dir"`
Modified time.Time `json:"modified"`
Created time.Time `json:"created"`
Sign string `json:"sign"`
Thumb string `json:"thumb"`
Type int `json:"type"`
HashInfo string `json:"hashinfo"`
}
type FsListResp struct {
Content []ObjResp `json:"content"`
Total int64 `json:"total"`
Readme string `json:"readme"`
Write bool `json:"write"`
Provider string `json:"provider"`
}
type ArchiveMetaReq struct {
ArchivePass string `json:"archive_pass"`
Password string `json:"password"`
Path string `json:"path"`
Refresh bool `json:"refresh"`
}
type TreeResp struct {
ObjResp
Children []TreeResp `json:"children"`
hashCache *utils.HashInfo
}
func (t *TreeResp) GetSize() int64 {
return t.Size
}
func (t *TreeResp) GetName() string {
return t.Name
}
func (t *TreeResp) ModTime() time.Time {
return t.Modified
}
func (t *TreeResp) CreateTime() time.Time {
return t.Created
}
func (t *TreeResp) IsDir() bool {
return t.ObjResp.IsDir
}
func (t *TreeResp) GetHash() utils.HashInfo {
return utils.FromString(t.HashInfo)
}
func (t *TreeResp) GetID() string {
return ""
}
func (t *TreeResp) GetPath() string {
return ""
}
func (t *TreeResp) GetChildren() []model.ObjTree {
ret := make([]model.ObjTree, 0, len(t.Children))
for _, child := range t.Children {
ret = append(ret, &child)
}
return ret
}
func (t *TreeResp) Thumb() string {
return t.ObjResp.Thumb
}
type ArchiveMetaResp struct {
Comment string `json:"comment"`
Encrypted bool `json:"encrypted"`
Content []TreeResp `json:"content"`
RawURL string `json:"raw_url"`
Sign string `json:"sign"`
}
type ArchiveListReq struct {
model.PageReq
ArchiveMetaReq
InnerPath string `json:"inner_path"`
}
type ArchiveListResp struct {
Content []ObjResp `json:"content"`
Total int64 `json:"total"`
}

View File

@ -0,0 +1,32 @@
package openlist_share
import (
"fmt"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
func (d *OpenListShare) request(api, method string, callback base.ReqCallback) ([]byte, int, error) {
url := d.Address + "/api" + api
req := base.RestyClient.R()
if callback != nil {
callback(req)
}
res, err := req.Execute(method, url)
if err != nil {
code := 0
if res != nil {
code = res.StatusCode()
}
return nil, code, err
}
if res.StatusCode() >= 400 {
return nil, res.StatusCode(), fmt.Errorf("request failed, status: %s", res.Status())
}
code := utils.Json.Get(res.Body(), "code").ToInt()
if code != 200 {
return nil, code, fmt.Errorf("request failed, code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString())
}
return res.Body(), 200, nil
}

View File

@ -12,6 +12,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
hash_extend "github.com/OpenListTeam/OpenList/v4/pkg/utils/hash"
"github.com/go-resty/resty/v2"
@ -140,7 +141,8 @@ func (d *PikPak) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}
_, err := d.request(fmt.Sprintf("https://api-drive.mypikpak.net/drive/v1/files/%s", file.GetID()),
http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(queryParams)
req.SetContext(ctx).
SetQueryParams(queryParams)
}, &resp)
if err != nil {
return nil, err
@ -159,7 +161,7 @@ func (d *PikPak) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
func (d *PikPak) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
req.SetContext(ctx).SetBody(base.Json{
"kind": "drive#folder",
"parent_id": parentDir.GetID(),
"name": dirName,
@ -170,7 +172,7 @@ func (d *PikPak) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
func (d *PikPak) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files:batchMove", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
req.SetContext(ctx).SetBody(base.Json{
"ids": []string{srcObj.GetID()},
"to": base.Json{
"parent_id": dstDir.GetID(),
@ -182,7 +184,7 @@ func (d *PikPak) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
func (d *PikPak) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files/"+srcObj.GetID(), http.MethodPatch, func(req *resty.Request) {
req.SetBody(base.Json{
req.SetContext(ctx).SetBody(base.Json{
"name": newName,
})
}, nil)
@ -191,7 +193,7 @@ func (d *PikPak) Rename(ctx context.Context, srcObj model.Obj, newName string) e
func (d *PikPak) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files:batchCopy", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
req.SetContext(ctx).SetBody(base.Json{
"ids": []string{srcObj.GetID()},
"to": base.Json{
"parent_id": dstDir.GetID(),
@ -203,7 +205,7 @@ func (d *PikPak) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files:batchTrash", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
req.SetContext(ctx).SetBody(base.Json{
"ids": []string{obj.GetID()},
})
}, nil)
@ -211,15 +213,11 @@ func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
hi := stream.GetHash()
sha1Str := hi.GetHash(hash_extend.GCID)
if len(sha1Str) < hash_extend.GCID.Width {
tFile, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
sha1Str := stream.GetHash().GetHash(hash_extend.GCID)
sha1Str, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
if len(sha1Str) < hash_extend.GCID.Width {
var err error
_, sha1Str, err = streamPkg.CacheFullAndHash(stream, &up, hash_extend.GCID, stream.GetSize())
if err != nil {
return err
}
@ -277,7 +275,8 @@ func (d *PikPak) OfflineDownload(ctx context.Context, fileUrl string, parentDir
var resp OfflineDownloadResp
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files", http.MethodPost, func(req *resty.Request) {
req.SetBody(requestBody)
req.SetContext(ctx).
SetBody(requestBody)
}, &resp)
if err != nil {

View File

@ -438,20 +438,19 @@ func (d *PikPak) UploadByOSS(ctx context.Context, params *S3Params, s model.File
}
func (d *PikPak) UploadByMultipart(ctx context.Context, params *S3Params, fileSize int64, s model.FileStreamer, up driver.UpdateProgress) error {
tmpF, err := s.CacheFullAndWriter(&up, nil)
if err != nil {
return err
}
var (
chunks []oss.FileChunk
parts []oss.UploadPart
imur oss.InitiateMultipartUploadResult
ossClient *oss.Client
bucket *oss.Bucket
err error
)
tmpF, err := s.CacheFullInTempFile()
if err != nil {
return err
}
if ossClient, err = oss.New(params.Endpoint, params.AccessKeyID, params.AccessKeySecret); err != nil {
return err
}

View File

@ -14,7 +14,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
)
@ -158,9 +157,7 @@ func (d *QuarkOpen) Put(ctx context.Context, dstDir model.Obj, stream model.File
}
if len(writers) > 0 {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, err := streamPkg.CacheFullInTempFileAndWriter(stream, cacheFileProgress, io.MultiWriter(writers...))
_, err := stream.CacheFullAndWriter(&up, io.MultiWriter(writers...))
if err != nil {
return err
}

View File

@ -8,14 +8,15 @@ import (
"encoding/hex"
"errors"
"fmt"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/google/uuid"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/google/uuid"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
@ -244,11 +245,8 @@ func (d *QuarkOpen) generateProofCode(file model.FileStreamer, proofSeed string,
// 读取数据
buf := make([]byte, length)
n, err := io.ReadFull(reader, buf)
if errors.Is(err, io.ErrUnexpectedEOF) {
return "", fmt.Errorf("can't read data, expected=%d, got=%d", length, n)
}
if err != nil {
return "", fmt.Errorf("failed to read data: %w", err)
if n != int(length) {
return "", fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
}
// Base64编码

View File

@ -13,7 +13,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
@ -144,9 +143,7 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
}
if len(writers) > 0 {
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, err := streamPkg.CacheFullInTempFileAndWriter(stream, cacheFileProgress, io.MultiWriter(writers...))
_, err := stream.CacheFullAndWriter(&up, io.MultiWriter(writers...))
if err != nil {
return err
}

Some files were not shown because too many files have changed in this diff Show More