Compare commits

...

144 Commits

Author SHA1 Message Date
7350b44036 fix(deps): update github.com/jlaffaye/ftp digest to 3f092e0 2025-08-31 07:48:41 +00:00
87cf95f50b fix(139): refactor part upload logic (#1184)
* fix(139): refactor part upload logic

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): handle upload errors

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): sort upload parts by PartNumber before uploading

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): improve error handling

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): add validation for upload part index to prevent out of bounds errors

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-31 15:47:12 +08:00
8ab26cb823 fix(123open): change DirectLink type from 'boolean' to 'bool' (#1180)
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-29 19:06:37 +08:00
5880c8e1af fix(189tv): use rate-limited upload stream in OldUpload function (#1176)
* fix(189tv): use rate-limited upload stream in OldUpload function

* fix(189tv): wrap tempFile with io.NopCloser to prevent premature closure in OldUpload function

* .
2025-08-29 16:01:50 +08:00
14bf4ecb4c fix(share): support custom proxy url (#1130)
* feat(share): support custom proxy url

* fix(share): count access

* fix: maybe a path traversal vulnerability?
2025-08-28 22:11:19 +08:00
04a5e58781 fix(server): can't edit .md source files (#1159)
* fix(server): can't edit .md source files

* chore

* add ignore direct link args
2025-08-28 16:19:57 +08:00
bbd4389345 fix(wopan): use fixed timezone for parsing time (#1170)
fix(wopan): update getTime function to use fixed timezone for parsing

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-28 13:02:02 +08:00
f350ccdf95 fix(189pc): sliceSize must not be equal to fileSize (#1169)
* fix(189pc): sliceSize not equal to fileSize

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* Update comment for sliceSize parameter

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-28 11:32:40 +08:00
4f2de9395e feat(degoo): token improvement (#1149)
* Update driver.go

Signed-off-by: Caspian <app@caspian.im>

* Update meta.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* make account optional

* ensure username and password

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: Caspian <app@caspian.im>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-26 01:22:59 +08:00
b0dbbebfb0 feat(drivers): add Teldrive driver (#1116)
https://github.com/tgdrive/teldrive

https://teldrive-docs.pages.dev/docs/api

实现:
* copy
* move
* link (302 share and local proxy)
* chunked uploads
* rename

未实现:
- openlist扫码登陆
- refresh token

https://github.com/OpenListTeam/OpenList-Docs/pull/155


* feat(Teldrive): Add driver Teldrive

* fix(teldrive): force webproxy and memory optimized

* chore(teldrive): go fmt

* chore(teldrive): remove TODO

* chore(teldrive): organize code

* feat(teldrive): add UseShareLink option and support 302

* fix(teldrive): standardize API path construction

* fix(teldrive): trim trailing slash from Address in Init method

* chore(teldrive): update help text for UseShareLink field in Addition struct

* fix(teldrive): set 10 MiB as default chunk size

---------

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: ILoveScratch <ilovescratch@foxmail.com>
2025-08-25 01:34:08 +08:00
0c27b4bd47 docs(contributing): update guidelines (#983)
[skip ci]

* docs(contributing): update guidelines

* docs(contributing): clarify fork

* docs(contributing): sync translation

Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* docs(contributing): add label and cc reminder

* docs(contributing): remove ensure new branch from checklist

* docs(contributing): replace generic GitHub URLs with user-specific ones

* docs(contributing): make branch deletion after PR merge optional

* docs(contributing): keep --recurse-submodules

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
2025-08-24 20:13:11 +08:00
736cd9e5f2 fix(quark): fix getTranscodingLink (#1136)
The first video info may not contain url

* fix(quark): fix getTranscodingLink

* fix(quark_tv): fix getTranscodingLink
2025-08-24 19:55:10 +08:00
c7a603c926 fix(115): fix get 115 app version (#1137) 2025-08-24 19:50:21 +08:00
a28d6d5693 fix(123_open): fix token refresh (#1121) 2025-08-23 23:01:41 +08:00
e59d2233e2 feat(drivers): add Degoo driver (#1097)
* Create driver.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Create util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Create types.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Create meta.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update drivers/degoo/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: CaspianGUAN <app@caspian.im>

* Update drivers/degoo/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: CaspianGUAN <app@caspian.im>

* Update driver.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update meta.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update types.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update driver.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update drivers/degoo/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: CaspianGUAN <app@caspian.im>

* Update util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* refactor(degoo): add Degoo driver integration and update API handling

* fix(degoo): apply suggestions

---------

Signed-off-by: CaspianGUAN <app@caspian.im>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-23 22:47:02 +08:00
01914a06ef refactor(ci): add permissions check at docker's entrypoint (#1128)
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-22 19:35:48 +08:00
6499374d1c fix(deps): update 115driver to v1.1.1 (close SheltonZhu/115driver#57) (#1115) 2025-08-20 21:33:21 +08:00
b054919d5c feat(ilanzou): add support for rapid upload and fix duplication handling (#1065)
* feat(ilanzou): add support for rapid upload token handling

* feat(ilanzou): add NoOverwriteUpload option
2025-08-19 19:19:44 +08:00
048ee9c2e5 feat(server): adapting #1099 to #991 (#1102) 2025-08-19 15:48:59 +08:00
23394548ca feat(123_open): add DirectLink option (#1045)
* feat(123_open): add `UseDirectLink` option

* feat(123_open): update rate limit rules

* fix(123_open): update api

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* feat(123_open): enhance direct link functionality with private key and expiration

* refactor(123_open): use UUID for random generation

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-19 15:23:10 +08:00
b04677b806 feat(server): add error page and status code (#1099) 2025-08-19 15:18:12 +08:00
e4c902dd93 feat(share): support more secure file sharing (#991)
提供一种类似大多数网盘的文件分享操作,这种分享方式可以通过强制 Web 代理隐藏文件源路径,可以设置分享码、最大访问数和过期时间,并且不需要启用 guest 用户。

在全局设置中可以调整:
- 是否强制 Web 代理
- 是否允许预览
- 是否允许预览压缩文件
- 分享文件后,点击“复制链接”按钮复制的内容

前端部分:OpenListTeam/OpenList-Frontend#156
文档部分:OpenListTeam/OpenList-Docs#130

Close #183
Close #526
Close #860
Close #892
Close #1079


* feat(share): support more secure file sharing

* feat(share): add archive preview

* fix(share): fix some bugs

* feat(openlist_share): add openlist share driver

* fix(share): lack unwrap when get virtual path

* fix: use unwrapPath instead of path for virtual file name comparison

* fix(share): change request method of /api/share/list from GET to Any

* fix(share): path traversal vulnerability in sharing path check

* 修复分享alias驱动的文件 没开代理时无法获取URL

* fix(sharing): update error message for sharing root link extraction

---------

Co-authored-by: Suyunmeng <69945917+Suyunmeng@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-08-19 15:10:02 +08:00
5d8bd258c0 refactor(docker): reduce docker image size (#1091)
* fix(docker): reduce image size

* refactor(docker): update user and group creation

* Update Dockerfile

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-19 10:27:33 +08:00
08c5283c8c feat(docker): Update docker-compose configuration (#1081)
* feat(docker): Update docker-compose configuration

* Update docker-compose.yml

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: huancun _- <huancun@hc26.org>

---------

Signed-off-by: huancun _- <huancun@hc26.org>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-18 14:29:59 +08:00
10a14f10cd fix(docker): improve startup process and SIGTERM handling (#1089)
* fix(ci): Modify the way of star OpenList.

* fix(ci): start runsvdir
2025-08-18 11:13:05 +08:00
f86ebc52a0 refactor(123_open): improve upload (#1076)
* refactor(123_open): improve upload

* optimize buffer initialization for multipart form

* 每次重试生成新的表单

* .
2025-08-17 14:25:23 +08:00
016ed90efa feat(stream): fast buffer freeing for large cache (#1053)
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-16 17:19:52 +08:00
d76407b201 fix(dropbox): incorrect path error during upload (#1052)
* Fix incorrect path error during upload on Dropbox

* Add RootNamespaceId to the config for direct modification

* Refactor Dropbox header logic: extract JSON marshaling into helper method

* Fix Dropbox: replace marshalToJSONString with utils.Json.MarshalToString
2025-08-16 14:18:02 +08:00
5de6b660f2 fix(terabox): user not exists error (#1056)
* fix user location error when upload file
2025-08-15 21:25:57 +08:00
71ada3b656 fix(ci-sync): fix workflow for syncing Repository (#1062) 2025-08-15 18:48:55 +08:00
dc42f0e226 [skip ci]fix(ci): update sync workflow (#1061) 2025-08-15 18:36:52 +08:00
74bf9f6467 [skip ci]feat(sync): add workflow to sync GitHub repository (#1060)
feat(sync): add workflow to sync GitHub repository to Gitee
2025-08-15 18:12:29 +08:00
d0c22a1ecb feat(ci): add the default user for docker image (#1036)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-12 09:51:40 +08:00
57fceabcf4 perf(stream): improve file stream range reading and caching mechanism (#1001)
* perf(stream): improve file stream range reading and caching mechanism

* 。

* add bytes_test.go

* fix(stream): handle EOF and buffer reading more gracefully

* 注释

* refactor: update CacheFullAndWriter to accept pointer for UpdateProgress

* update tests

* Update drivers/google_drive/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>

* 更优雅的克隆Link

* 修复stream已缓存但无法重复读取

* 将Bytes类型重命名为Reader

* 修复栈溢出

* update tests

---------

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-11 23:41:22 +08:00
8c244a984d refactor(assets): migrate to resource domain (#975)
* refactor(assets): migrate to resource domain

* feat(bootstrap): add migration value for logo and favicon settings
2025-08-10 09:57:33 +08:00
df479ba806 fix(aliyundrive_open): limit rate for every request (close #724) (#1011)
* fix(aliyundrive_open): limit rate for `Remove` and `MakeDir`; reduce limit for `List` and `Link` (close #724)

* Update drivers/aliyundrive_open/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: 火星大王 <34576789+huoxingdawang@users.noreply.github.com>

* Update drivers/aliyundrive_open/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: 火星大王 <34576789+huoxingdawang@users.noreply.github.com>

* fix(aliyundrive_open): limit rate for every request

* fix(aliyundrive_open): fix limiter not work on reference driver

* fix(aliyundrive_open): typo

* fix(aliyundrive_open): limiter not set to nil after free

* fix(aliyundrive_share): limit rate for every request

---------

Signed-off-by: 火星大王 <34576789+huoxingdawang@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-10 09:55:20 +08:00
5ae8e96237 feat(123_open): update Put method to return model.Obj (#1008)
* feat(123_open): update Put method to return model.Obj

* fix(123_open): declear time zones

* chore(123_open): fix typo

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123_open): use fixed timezone

* fix(123_open): implement PutResult interface for Open123 driver

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: Suyunmeng <69945917+Suyunmeng@users.noreply.github.com>
2025-08-09 15:09:12 +08:00
aa0ced47b0 fix(webdav): Handle HEAD requests for directories with appropriate headers (#1015)
Implement handling of HEAD requests for directories by setting the correct Content-Type and Content-Length headers.
2025-08-09 13:57:09 +08:00
ab747d9052 feat(config): Add PWA manifest.json endpoint for web app installation (#990)
* feat(config): Add PWA manifest.json endpoint for web app installation

* fix: Update comment to English in manifest handler

* fix: fix EOL

* fix: Remove unused fmt import from manifest handler

* feat: use site settings for manifest name and icon

* fix(manifest): Move manifest.json route to static handler for proper CDN handling

* feat: move manifest.json handler to static package and improve path handling

* feat: Add custom static file handler to prevent manifest.json conflicts

* fix: Integrate manifest.json handling into static file serving routes

* fix: Simplify PWA manifest scope handling and static file serving

- Remove CDN-specific logic for PWA manifest scope and start_url
- Always use base path for PWA scope regardless of CDN configuration
- Replace manual file serving logic with http.FileServer for static assets

* fix: Ensure consistent base path handling in site configuration and manifest path construction

* fix: Refactor trailing slash handling in site configuration

* feat(static): update manifest path handling and add route for manifest.json
2025-08-08 20:07:51 +08:00
93c06213d4 feat(local): add directory size support (#624)
* feat(local): add directory size support

* fix(local): fix and improve directory size calculation

* style(local): fix code style

* style(local): fix code style

* style(local): fix code style

* fix(local): refresh directory size when force refresh

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>

* fix:(local): Avoid traversing the parent's parent, which leads to an endless loop

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>

* fix(local:) refresh dir size only enabled

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>

* fix(local): logical error && add RecalculateDirSize && cleaner code for int64

* feat(local): add Benchmark for CalculateDirSize

* refactor(local): 优化移动中对于错误的判断。

---------

Signed-off-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>
Co-authored-by: 我怎么就不是一只猫呢? <26274059+dezhishen@users.noreply.github.com>
2025-08-08 16:59:16 +08:00
b9b8eed285 [skip ci]feat(ci): add FRONTEND_REPO variable to workflows and build script (#1006) 2025-08-08 16:36:22 +08:00
317d190b77 fix(ftp): create a new connection for each download (#989) 2025-08-06 20:35:01 +08:00
52d7d819ad feat(lenovonas_share): add thumb (#986) 2025-08-06 17:34:43 +08:00
0483e0f868 feat(driver_strm): also shown some files with strm (#969)
* feat(driver_strm): Also shown some files with strm

Allow user set some file types that need to shown with strm, usually subtitles

Most of code was copy and managed from drivers/alias

* 优化

* 优化

* 。

* 添加注释

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
2025-08-06 15:40:48 +08:00
08dae4f55f feat(123_open): update upload api v2 (#976) 2025-08-06 15:27:13 +08:00
9ac0484bc0 perf(ftp): improve concurrent Link response; fix alias/local driver issues (#974) 2025-08-06 13:32:37 +08:00
8cf15183a0 perf: optimize upload (#554)
* pref(115,123): optimize upload

* chore

* aliyun_open, google_drive

* fix bug

* chore

* cloudreve, cloudreve_v4, onedrive, onedrive_app

* chore(conf): add `max_buffer_limit` option

* 123pan multithread upload

* doubao

* google_drive

* chore

* chore

* chore: 计算分片数量的代码

* MaxBufferLimit自动挡

* MaxBufferLimit自动挡

* 189pc

* errorgroup添加Lifecycle

* 查缺补漏

* Conf.MaxBufferLimit单位为MB

* 。

---------

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-05 21:42:54 +08:00
c8f2aaaa55 feat(cmd): add delete command for storage (#952) 2025-08-04 17:30:43 +08:00
1208bd0a83 fix(fs): nil interface not equal to nil (#971)
https://go.dev/doc/faq#nil_error
2025-08-03 23:51:11 +08:00
6b096bcad4 fix(fs): deadlock when get link error (#963) 2025-08-02 17:49:53 +08:00
58dbf088f9 fix(fs): forget cache when get link error (#956) 2025-08-02 11:03:34 +08:00
05ff7908f2 fix(strm): encoded path is ineffective (#951) 2025-08-02 00:23:18 +08:00
a703b736c9 feat(offline_download): filter empty URLs in offline download requests (#948) 2025-08-01 16:12:21 +08:00
e458f2ab53 fix(bootstrap): add newline after initial admin password output (#943)
fix(bootstrap): add newline after initial admin  password output
2025-08-01 13:43:41 +08:00
a5a22e7085 fix(local): Treat junction as directory in Windows. (#809)
Treat junction as directory in Windows.
2025-07-31 13:54:56 +08:00
9469c95b14 fix(security): potential XSS vulnerabilities (#896) 2025-07-31 12:57:20 +08:00
cf912dcf7a fix(cmd): output to console (#920)
fix(cmd): output to terminal
2025-07-31 11:44:00 +08:00
ccd4af26e5 feat(patch): add migration from Alist V3 driver to OpenList (#919)
* feat(patch): add migration from Alist V3 driver to OpenList

* chore(patch): improve logging
2025-07-31 11:43:21 +08:00
1682e873d6 feat(search): enhanced meilisearch search experience (#864)
* feat(search): enhanced `meilisearch` search experience
- upgrade `meilisearch` dependency
- support subdirectory search
- optimize searchDocument fields for subdirectory search
- specify full index uid instead of index prefix

* fix(search): more fixes to `meilisearch`
- make use of context where context was not used
- remove code of waiting task in deletion process, as tasks are queued and will be executed orderly (if tasks were submitted to the queue successfully), which can improve `AutoUpdate` performance
2025-07-31 11:24:22 +08:00
54ae7e6d9b feat(115_open): Add GetObjInfo to accelerate getting link (#888)
* feat(115_open): Add GetObjInfo to accelerate getting link

* feat(fs): use cache directly when cache exist
2025-07-31 11:20:02 +08:00
991da7d87f feat(strm): add local mode (#885)
* feat(strm): add local mode

* Update drivers/strm/meta.go

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>

* feat(strm): local mode add sign

---------

Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-31 11:18:59 +08:00
Dgs
a498091aef fix(123&&123_share): fix link request header referer (#915) 2025-07-31 10:10:38 +08:00
976c82bb2b fix(drivers): update time-related fields to int64 (#913)
- In doubao/types.go:
  - Change LastUpdateTime from int to int64
  - Change UserCreateTime from int to int64
- In doubao_share/types.go:
  - Change CreateTime and UpdateTime from int to int64 in ShareInfo and FilePath
- In quark_uc/types.go:
  - Change UpdateTime from int to int64 in TranscodingResp

These changes ensure consistent and accurate representation of timestamp data across the project.
2025-07-31 10:10:32 +08:00
5b41a3bdff feat(ci): Add support for LoongArch64 architecture builds (#907) 2025-07-31 10:10:19 +08:00
19d1a3b785 refactor(ci): Refactor Docker build to use base images and dynamic Dockerfile generation (#904) 2025-07-30 15:04:29 +08:00
3c7b0c4999 fix(qb): Configure HTTP client with connection pooling and fix resource leaks in qBittorrent client. (#898) 2025-07-29 21:56:36 +08:00
d6867b4ab6 fix(user): show admin password on first start (#883)
* fix: fix admin password not shown in first start
* chore: add time dependence

Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>

* fix: fix log format

Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>

---------

Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>
Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
2025-07-29 21:36:27 +08:00
11cf561307 fix(security): potential XSS vulnerabilities (#880)
* fix(security): potential XSS vulnerabilities

* chore: replace alist identifier to openlist identifier

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>

---------

Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>
Co-authored-by: ILoveScratch <ilovescratch@foxmail.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-29 20:17:11 +08:00
239b58f63e fix(ci):Disable linux/s390x Docker builds (#887) 2025-07-29 16:22:50 +08:00
7da06655cb feat(setting): add site version information (#859)
* feat(setting): add site version information

* feat(conf): update conf.WebVersion to rolling

* fix(static): update condition to check conf.Version instead of conf.WebVersion

* fix(build.sh): use rolling release for web frontend in dev and beta builds

* chore(build.sh): update GitAuthor to The OpenList Projects Contributors

* fix(static): update condition to check conf.WebVersion
2025-07-29 09:49:33 +08:00
e0b3a611ba feat(thunderx,pikpak): add offline download support for ThunderX; add ctx to specific PikPak functions (#879)
* feat(thunderx,pikpak): add offline download support for ThunderX; add ctx to specific PikPak functions

* Update internal/offline_download/tool/download.go

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: 花月喵梦 <152958106+nekohy@users.noreply.github.com>

---------

Signed-off-by: 花月喵梦 <152958106+nekohy@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-29 09:46:28 +08:00
be1ad08a83 feat(ci):Add Windows 7 and LoongArch Release build support (#857)
* feat:Add Windows 7 and LoongArch old world build support (#30)

* feat:Add Windows 7 and Loongson old world build support

- Add BuildWin7() function with patched Go compiler for Windows 7 compatibility
- Add BuildLoongOldWorld() function for linux-loong64-abi1.0 target
- Create Zig-based wrapper scripts for Windows 7 cross-compilation
- Integrate new build functions into existing release workflows

* fix(win7):Add MinGW-w64 toolchain and improve LoongArch ABI isolation

- Install MinGW-w64 cross-compilation toolchain for Win7 compatibility
- Replace Zig compiler wrappers with MinGW-w64 for Windows 7 builds
- Add Go build cache cleaning to prevent LoongArch ABI1.0/ABI2.0 cross-contamination
- Force clean rebuilds (-a flag) for LoongArch builds to ensure ABI compatibility

* feat: add Windows 7 build support to beta release workflow

* feat: add LoongArch ABI2.0 support alongside existing ABI1.0 build (#31)

- Add BuildWin7() function with patched Go compiler for Windows 7 compatibility
- Add BuildLoongOldWorld() function for linux-loong64-abi1.0 target
- Create Zig-based wrapper scripts for Windows 7 cross-compilation
- Integrate new build functions into existing release workflows
- Install MinGW-w64 cross-compilation toolchain for Win7 compatibility
- Replace Zig compiler wrappers with MinGW-w64 for Windows 7 builds
- Add Go build cache cleaning to prevent LoongArch ABI1.0/ABI2.0 cross-contamination
- Force clean rebuilds (-a flag) for LoongArch builds to ensure ABI compatibility

* [skip ci]refactor:Refactor LoongArch builds to separate glibc from musl compilation

* fix(go-cache):Improve error handling for Go module cache cleaning in LoongArch builds

* feat(build): Enhance LoongArch build process with improved toolchain setup and cache management

* fix(build): Update Windows 7 target naming in build scripts and workflows

* refactor(build): Replace MinGW-w64 with Zig for Windows 7 toolchain in build scripts

* chore(cgo): remove cgo-actions subproject
2025-07-27 00:27:31 +08:00
4e9c30f49d feat(fs): full support webdav cross-driver copy and move (#823)
* fix(fs): restore webdav cross-driver copy and move

* fix bug

* webdav支持复制、移动 文件夹

* 优化

* 。
2025-07-26 00:27:46 +08:00
0ee31a3f36 fix(crypt): wrong ContentLength 2025-07-25 19:55:22 +08:00
23bddf991e feat(drivers): enable local sorting for cloudreve, ilanzou (#840)
* feat(cloudreve): enable local sorting

* feat(ilanzou): enable local sorting
2025-07-25 18:01:19 +08:00
da8d6607cf fix(static): support logo replacement (#834 Close #754) 2025-07-25 17:12:51 +08:00
6134574dac fix(fs): rename bug (#832)
* fix(fs): rename bug

* chore

* fix bug

* .

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-25 13:42:39 +08:00
b273232f87 refactor(log): redir utils.Log to logrus after init (#833) 2025-07-25 13:38:45 +08:00
358e4d851e refactor(log): filter (#816) 2025-07-25 11:33:27 +08:00
e8a1ed638a fix(ci):Exclude FreeBSD patch releases from version detection 2025-07-24 22:41:45 +08:00
4106e2a996 fix(static): correct CDN fetch condition for index.html (#814) 2025-07-24 22:28:58 +08:00
c2271df64e fix(ci): update OpenListTeam/cgo-actions to v1.2.2 to fix loongarch64 build (#811)
* Update beta_release.yml

* Update build.yml
2025-07-24 22:20:23 +08:00
d4b8570eb8 fix(docker): Fix the runsvdir permission issue caused by su-exec user switching and resolve the RUN_ARIA2 variable compatibility problem. (#805) 2025-07-24 17:22:49 +08:00
bd297e8ccc fix(deps): update module golang.org/x/image to v0.29.0 (#804)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 16:22:19 +08:00
923d282c8a fix(deps): update module github.com/sheltonzhu/115driver to v1.1.0 (#803)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 16:21:32 +08:00
4d8c4d7089 fix(deps): update module github.com/coreos/go-oidc to v2.3.0+incompatible (#586)
* fix(deps): update module github.com/coreos/go-oidc to v2.3.0+incompatible

* Update go.mod

Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>

---------

Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-24 16:21:03 +08:00
e93ab76036 feat(task-group): introduce TaskGroupCoordinator for coordinated task execution (#721)
* feat(task): add task hook,batch task
refactor(move): move use CopyTask

* Update internal/task/batch_task/refresh.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>

* fix: upload task allFinish judge

* Update internal/task/batch_task/refresh.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>

* feat: enhance concurrency safety

* 优化代码

* 解压缩

* 修复死锁

* refactor(move): move as task

* 重构,优化

* .

* 优化,修复bug

* .

* 修复bug

* feat: add task retry judge

* 代理Task.SetState函数来判断Task的生命周期

* chore: use OnSucceeded、OnFailed、OnBeforeRetry functions

* 优化

* 优化,去除重复代码

* .

* 优化

* .

* webdav

* Revert "fix(fs):After the file is copied or moved, flush the cache of the directory that was copied or moved to."

This reverts commit 5f03edd683.

---------

Signed-off-by: Seven <53081179+Seven66677731@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-24 16:15:24 +08:00
a9f02ecdac refactor(log):Refactor log filtering to use centralized configuration and add server-specific filtering (#798)
* feat(log):Add configurable log filtering middleware for HTTP requests

Implement a comprehensive log filtering system that allows selective suppression of HTTP request logs based on paths, methods, and prefixes. The system includes environment variable configuration support and filters health checks, WebDAV requests, and HEAD requests by default to reduce log noise.

* fix(log):Replace gin.DefaultLogFormatter with custom implementation

* Remove filtered logger test file

* fix(log):Refactor log filtering to use centralized configuration and add server-specific filtering

* fix(log):Add documentation comments for log filtering configuration
2025-07-24 16:10:47 +08:00
93849a3b5b fix(deps): update module github.com/pquerna/otp to v1.5.0 (#799)
Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-24 16:07:23 +08:00
c2e0d0c9ce fix(deps): update module github.com/protonmail/go-crypto to v1.3.0 (#800)
Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-24 16:06:50 +08:00
4a713363ee fix(deps): update module github.com/azure/azure-sdk-for-go/sdk/storage/azblob to v1.6.2 (#801)
Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-24 16:06:10 +08:00
3da8ccb7a7 fix(deps): update module github.com/rclone/rclone to v1.70.3 (#802)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 16:05:20 +08:00
676b8cff0b fix(deps): update azure-sdk-for-go monorepo (#579)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:27:36 +08:00
57cf28fc90 fix(deps): update github.com/fclairamb/ftpserverlib digest to 4a925d7 (#675)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:26:39 +08:00
8cf90e074d fix(deps): update module github.com/charmbracelet/bubbletea to v1.3.6 (#585)
Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-24 10:26:23 +08:00
74c2ed8306 fix(deps): update module github.com/charmbracelet/bubbles to v0.21.0 (#583)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:25:09 +08:00
5f03edd683 fix(fs):After the file is copied or moved, flush the cache of the directory that was copied or moved to. (#592)
* fix(fs):After the file is copied, the cache of the copied directory is refreshed

* fixed randomstring

* fixed EOL and Sync branch

chore(quark_uc): `webdav_policy` default to native_proxy

* fixed uuid and other bugs

* fixed comments

* fixed EOL

* add move refresh

* fixed builds

* fixed batch

* change betch to task.go

---------

Co-authored-by: Sumengjing <146963948+suyunjing-su@users.noreply.github.com>
2025-07-24 10:24:12 +08:00
8b65c918d4 chore(permission): admin enables webdav read-only by default (#726)
chore: admin enables webdav read-only by default
2025-07-24 10:19:49 +08:00
b5f0e3e5ee fix(deps): update module github.com/go-webauthn/webauthn to v0.13.4 (#677)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:05:44 +08:00
179894ff37 fix(deps): update module github.com/ipfs/go-cid to v0.5.0 (#680)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:05:05 +08:00
e2fc89c637 chore(deps): update dependency go to v1.24.5 (#783)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:04:20 +08:00
cacf67b181 fix(deps): update module github.com/yuin/goldmark to v1.7.13 (#794)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-24 10:04:00 +08:00
afb043e1d6 feat(docker): Change keep-alive strategy to runit, add aria2 log support (#791) 2025-07-24 09:19:33 +08:00
d9debb81ad feat(log):Add configurable log filtering middleware for HTTP requests (#782)
* feat(log):Add configurable log filtering middleware for HTTP requests

Implement a comprehensive log filtering system that allows selective suppression of HTTP request logs based on paths, methods, and prefixes. The system includes environment variable configuration support and filters health checks, WebDAV requests, and HEAD requests by default to reduce log noise.
2025-07-24 00:00:26 +08:00
4c069fddd6 fix(terabox): file upload error (#733)
* fix(terabox):fix file upload error failed to create file errno 10

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>

* fix(terabox):fix file upload error failed to create file errno 10

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>

* replace the goto statement with the retry-go package

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>

* Update util.go

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>

* Update util.go

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>

* go fmt

---------

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-07-23 23:42:12 +08:00
b450a2104d chore(docs): update domain (#788)
* chore(docs): update domain

* docs(issue): add guide link for bug reporting
2025-07-23 14:26:21 +08:00
7d0de17daf feat(static): fetch index.html from cdn for beta (#372)
* refactor(static): simplify folder iteration in Static function

* feat(static): disable local static when `cdn` is set

* feat(static): fetch index.html from cdn for beta

* refactor(static): use RestyClient for better retrying

* fix(static): add Accept header when fetching index.html from CDN

* refactor(static): optimize HTML replacement

* chore(static): add logging to static file system initialization

* feat(static): ensure static file redirected to CDN
2025-07-22 22:14:07 +08:00
bba4fb2203 fix(security): directory traversal (#744)
* fix(security): Directory traversal

* chore: .

* 优化

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-22 14:45:01 +08:00
a20c2020f8 fix(cmd): optimize parse of command flag --data (#777)
* Fix (cmd): optimize parse of command flag `--data`

* DBFile

* 优化

* os.Getwd()
2025-07-22 10:51:28 +08:00
a92b5eb929 refactor(cloudreve): use retry-go for net/http uploads (#773)
* refactor(cloudreve): use retry-go for uploads

* refactor(cloudreve_v4): use retry-go for uploads

* refactor(onedrive): use retry-go for uploads

* refactor(onedrive_app): use retry-go for uploads

* chore(onedrive_app): remove unnecessary error handling for host retrieval

* feat(cloudreve): move read logic inside retry block

* feat(cloudreve_v4): move read logic inside retry block

* feat(onedrive): move read logic inside retry block

* feat(onedrive_app): move read logic inside retry block
2025-07-22 10:25:04 +08:00
6817494a41 chore(ci): update cgo-actions to 1.2.1 & add patch version define for go (#779)
chore(ci): update cgo-actions to 1.2.1 & fix patch version for go
2025-07-22 09:02:07 +08:00
5a0d8ee1b8 feat(proxy): add disable proxy sign (#764)
* feat(proxy): add disable proxy sign

* Update driver.go

* GenerateDownProxyUrl

* .

* Update internal/op/driver.go

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>

* .

---------

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
2025-07-21 17:03:08 +08:00
012e51c551 fix(cloudreve_v4): remove deprecated authn check for login (#767)
* fix(cloudreve_v4): disable authn check for login

* chore(cloudreve_v4): update site login config fields
2025-07-21 15:53:10 +08:00
59ec1dbc9b feat(lenovonas_share): add option to not show root directory (#772) 2025-07-21 14:38:10 +08:00
6bb28d13f9 fix(quark): set the transcoding link ContentLength to the correct size 2025-07-20 16:40:32 +08:00
811a862288 feat(archives): add additional accepted archive extensions (#747) 2025-07-20 15:32:46 +08:00
74d32fd4d7 fix(simplehttp): logic bug when unable to parse file name (#761) 2025-07-20 14:13:30 +08:00
cedb3d488d [skip ci] chore(ci): output binary name set to openlist 2025-07-19 23:02:29 +08:00
86324d2d6b fix(net): ensure accurate content-length in response (#749)
* fix(fs): ensure accurate content-length in http2 requests

Chrome browsers were unable to preview thumbnails, reporting an
'ERR_HTTP_2_PROTOCOL_ERROR'. This was caused by an incorrect
content-length header in the server's response for thumbnail images.

This commit corrects the content-length calculation, allowing
Chrome and other compliant clients to render thumbnails correctly.

* fix(net): ensure accurate content-length in response

* 补缺

* .

---------

Co-authored-by: zhiqiang.huang <zhiqiang.tech@gmail.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-19 20:36:27 +08:00
648079ae24 remove upx (#750)
Update build.sh

Signed-off-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-07-18 12:38:17 +08:00
Dgs
e8d45398d6 feat(quark_uc_tv): add streaming link api (#728) 2025-07-17 14:24:16 +08:00
0c461991f9 chore: standardize context keys with custom ContextKey type (#697)
* chore: standardize context keys with custom ContextKey type

* fix bug

* 使用Request.Context
2025-07-14 23:55:17 +08:00
2a4c546a8b feat: default settings api (#716)
* feat: default settings api

* fix logic bug

* chore
2025-07-14 23:41:34 +08:00
750d4eb3f6 docs(README): add disclaimer (#705)
add disclaimer
2025-07-13 15:22:25 +08:00
cc01b410a4 perf(link): optimize concurrent response (#641)
* fix(crypt): bug caused by link cache

* perf(crypt,mega,halalcloud,quark,uc): optimize concurrent response link

* chore: 删除无用代码

* ftp

* 修复bug;资源释放

* 添加SyncClosers

* local,sftp,smb

* 重构,优化,增强

* Update internal/stream/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>

* chore

* chore

* 优化,修复bug

* .

---------

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-12 17:57:54 +08:00
e5fbe72581 fix(security): add login count validation for webdav (#693) 2025-07-12 17:03:41 +08:00
283f3723d1 [skip ci] chore(ci): update openwrt hook 2025-07-12 12:06:36 +08:00
ad8c7b37a1 chore(ci):Disable duplicate build process 2025-07-12 11:49:27 +08:00
a84ffb96e9 chore(ci):Simplify the build process (#686)
* refactor(ci):Minify build files
2025-07-11 20:30:31 +08:00
19c6b6f930 feat(115_open): add offline download (#683) 2025-07-11 20:17:54 +08:00
eed3c0533c fix(deps): update module github.com/go-resty/resty/v2 to v2.16.5 (#628)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:26:44 +08:00
c72ba9828a fix(deps): update module github.com/deckarep/golang-set/v2 to v2.8.0 (#589)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:25:08 +08:00
4965a1b909 fix(deps): update module github.com/blevesearch/bleve/v2 to v2.5.2 (#582)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:24:50 +08:00
1bba550469 chore(deps): update dependency go to 1.24 (#578)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:24:23 +08:00
d678322b18 fix(deps): update module github.com/yuin/goldmark to v1.7.12 (#575)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:24:09 +08:00
efd8897bdf fix(deps): update module github.com/pkg/sftp to v1.13.9 (#574)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 10:23:52 +08:00
7c7cec0993 style(offline_download): add more description in log (#653)
fix(offline_download): add more description in log
2025-07-09 14:16:05 +08:00
3838ef0663 feat(traffic): update progress when caching file (#646)
* feat(traffic): update progress when caching file

* 调整参数位置和命名

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-08 21:41:45 +08:00
9e610af114 fix(115_open): upload progress error (#637) 2025-07-07 18:39:09 +08:00
0177177238 fix(crypt): pass refresh list request (close #609) 2025-07-06 13:20:42 +08:00
a77e515c9b fix(ocr): repair verification code OCR recognition service (#602)
* fix(ocr):Repair verification code OCR recognition service

* 修复对 非新用户 无效的问题

* chore: SettingItem.PreDefault重命名为MigrationValue

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-07-06 13:09:17 +08:00
4af16ab009 fix(115open):fix limit_rate save (#601) 2025-07-06 12:07:07 +08:00
da35423198 [skip ci] chore: go mod tidy 2025-07-06 00:55:23 +08:00
9612d61e60 chore(pkg): update singleflight 2025-07-05 13:31:47 +08:00
313 changed files with 11767 additions and 5291 deletions

View File

@ -25,11 +25,11 @@ body:
- label: |
我确认我的描述清晰,语法礼貌,能帮助开发者快速定位问题,并符合社区规则。
- label: |
我已确认阅读了[OpenList文档](https://docs.oplist.org)。
我已确认阅读了[OpenList文档](https://doc.oplist.org)。
- label: |
我已确认没有重复的问题或讨论。
- label: |
我已确认是`OpenList`的问题,而不是其他原因(例如 [网络](https://docs.oplist.org/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) `依赖`或`操作`)。
我已确认是`OpenList`的问题,而不是其他原因(例如 [网络](https://doc.oplist.org/faq/howto#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host-1) `依赖`或`操作`)。
- label: |
我认为此问题必须由`OpenList`处理,而非第三方。
- label: |
@ -72,7 +72,7 @@ body:
attributes:
label: 日志(可选)
description: |
请复制粘贴错误日志,或者截图。(可隐藏隐私字段)
请复制粘贴错误日志,或者截图。(可隐藏隐私字段) [查看方法](https://doc.oplist.org/faq/howto#%E5%A6%82%E4%BD%95%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8Dbug)
- type: textarea
id: reproduction
attributes:

View File

@ -25,11 +25,11 @@ body:
- label: |
I confirm my description is clear, polite, helps developers quickly locate the issue, and complies with community rules.
- label: |
I have read the [OpenList documentation](https://docs.oplist.org).
I have read the [OpenList documentation](https://doc.oplist.org).
- label: |
I confirm there are no duplicate issues or discussions.
- label: |
I confirm this is an `OpenList` issue, not caused by other reasons (such as [network](https://docs.oplist.org/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host), dependencies, or operation).
I confirm this is an `OpenList` issue, not caused by other reasons (such as [network](https://doc.oplist.org/faq/howto#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host-1), dependencies, or operation).
- label: |
I believe this issue must be handled by `OpenList` and not by a third party.
- label: |
@ -72,7 +72,7 @@ body:
attributes:
label: Logs (optional)
description: |
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields)
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) [Guide](https://doc.oplist.org/faq/howto#how-to-quickly-locate-bugs)
- type: textarea
id: reproduction
attributes:

View File

@ -19,7 +19,7 @@ body:
- label: |
我确认我的描述清晰,语法礼貌,能帮助开发者快速定位问题,并符合社区规则。
- label: |
我已确认阅读了[OpenList文档](https://docs.oplist.org)。
我已确认阅读了[OpenList文档](https://doc.oplist.org)。
- label: |
我已确认没有重复的问题或讨论。
- label: |

View File

@ -19,7 +19,7 @@ body:
- label: |
I confirm my description is clear, polite, helps developers quickly locate the issue, and complies with community rules.
- label: |
I have read the [OpenList documentation](https://docs.oplist.org).
I have read the [OpenList documentation](https://doc.oplist.org).
- label: |
I confirm there are no duplicate issues or discussions.
- label: |

56
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,56 @@
<!--
Provide a general summary of your changes in the Title above.
The PR title must start with `feat(): `, `docs(): `, `fix(): `, `style(): `, or `refactor(): `, `chore(): `. For example: `feat(component): add new feature`.
If it spans multiple components, use the main component as the prefix and enumerate in the title, describe in the body.
-->
<!--
在上方标题中提供您更改的总体摘要。
PR 标题需以 `feat(): `, `docs(): `, `fix(): `, `style(): `, `refactor(): `, `chore(): ` 其中之一开头,例如:`feat(component): 新增功能`
如果跨多个组件,请使用主要组件作为前缀,并在标题中枚举、描述中说明。
-->
## Description / 描述
<!-- Describe your changes in detail -->
<!-- 详细描述您的更改 -->
## Motivation and Context / 背景
<!-- Why is this change required? What problem does it solve? -->
<!-- 为什么需要此更改?它解决了什么问题? -->
<!-- If it fixes an open issue, please link to the issue here. -->
<!-- 如果修复了一个打开的issue请在此处链接到该issue -->
Closes #XXXX
<!-- or -->
<!-- 或者 -->
Relates to #XXXX
## How Has This Been Tested? / 测试
<!-- Please describe in detail how you tested your changes. -->
<!-- 请详细描述您如何测试更改 -->
## Checklist / 检查清单
<!-- Go over all the following points, and put an `x` in all the boxes that apply. -->
<!-- 检查以下所有要点,并在所有适用的框中打`x` -->
<!-- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
<!-- 如果您对其中任何一项不确定,请不要犹豫提问。我们会帮助您! -->
- [ ] I have read the [CONTRIBUTING](https://github.com/OpenListTeam/OpenList/blob/main/CONTRIBUTING.md) document.
我已阅读 [CONTRIBUTING](https://github.com/OpenListTeam/OpenList/blob/main/CONTRIBUTING.md) 文档。
- [ ] I have formatted my code with `go fmt` or [prettier](https://prettier.io/).
我已使用 `go fmt` 或 [prettier](https://prettier.io/) 格式化提交的代码。
- [ ] I have added appropriate labels to this PR (or mentioned needed labels in the description if lacking permissions).
我已为此 PR 添加了适当的标签(如无权限或需要的标签不存在,请在描述中说明,管理员将后续处理)。
- [ ] I have requested review from relevant code authors using the "Request review" feature when applicable.
我已在适当情况下使用"Request review"功能请求相关代码作者进行审查。
- [ ] I have updated the repository accordingly (If its needed).
我已相应更新了相关仓库(若适用)。
- [ ] [OpenList-Frontend](https://github.com/OpenListTeam/OpenList-Frontend) #XXXX
- [ ] [OpenList-Docs](https://github.com/OpenListTeam/OpenList-Docs) #XXXX

View File

@ -14,12 +14,8 @@ permissions:
jobs:
changelog:
strategy:
matrix:
platform: [ubuntu-latest]
go-version: ["1.21"]
name: Beta Release Changelog
runs-on: ${{ matrix.platform }}
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
@ -65,7 +61,7 @@ jobs:
strategy:
matrix:
include:
- target: "!(*musl*|*windows-arm64*|*android*|*freebsd*)" # xgo
- target: "!(*musl*|*windows-arm64*|*windows7-*|*android*|*freebsd*)" # xgo and loongarch
hash: "md5"
- target: "linux-!(arm*)-musl*" #musl-not-arm
hash: "md5-linux-musl"
@ -73,6 +69,8 @@ jobs:
hash: "md5-linux-musl-arm"
- target: "windows-arm64" #win-arm64
hash: "md5-windows-arm64"
- target: "windows7-*" #win7
hash: "md5-windows7"
- target: "android-*" #android
hash: "md5-android"
- target: "freebsd-*" #freebsd
@ -89,27 +87,29 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: "1.22"
go-version: "1.24.5"
- name: Setup web
run: bash build.sh dev web
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Build
uses: OpenListTeam/cgo-actions@v1.1.2
uses: OpenListTeam/cgo-actions@v1.2.2
with:
targets: ${{ matrix.target }}
musl-target-format: $os-$musl-$arch
github-token: ${{ secrets.GITHUB_TOKEN }}
out-dir: build
output: openlist-$target$ext
musl-base-url: "https://github.com/OpenListTeam/musl-compilers/releases/latest/download/"
x-flags: |
github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=OpenList
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=The OpenList Projects Contributors <noreply@openlist.team>
github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit
github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=dev
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=rolling
- name: Compress
run: |

View File

@ -1,8 +1,6 @@
name: Test Build
on:
push:
branches: ["main"]
pull_request:
branches: ["main"]
workflow_dispatch:
@ -15,7 +13,6 @@ jobs:
build:
strategy:
matrix:
platform: [ubuntu-latest]
target:
- darwin-amd64
- darwin-arm64
@ -24,8 +21,8 @@ jobs:
- linux-amd64-musl
- windows-arm64
- android-arm64
name: Build
runs-on: ${{ matrix.platform }}
name: Build ${{ matrix.target }}
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
@ -36,28 +33,31 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: "1.22"
go-version: "1.24.5"
- name: Setup web
run: bash build.sh dev web
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Build
uses: OpenListTeam/cgo-actions@v1.1.2
uses: OpenListTeam/cgo-actions@v1.2.2
with:
targets: ${{ matrix.target }}
musl-target-format: $os-$musl-$arch
github-token: ${{ secrets.GITHUB_TOKEN }}
out-dir: build
x-flags: |
github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=OpenList
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=The OpenList Projects Contributors <noreply@openlist.team>
github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit
github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=dev
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=rolling
output: openlist$ext
- name: Upload artifact
uses: actions/upload-artifact@v4
with:
name: openlist_${{ env.SHA }}_${{ matrix.target }}
name: openlist_${{ steps.short-sha.outputs.sha }}_${{ matrix.target }}
path: build/*

View File

@ -1,4 +1,4 @@
name: Automatic changelog
name: Release Automatic changelog
on:
push:

View File

@ -8,24 +8,34 @@ permissions:
contents: write
jobs:
# Set release to prerelease first
prerelease:
name: Set Prerelease
runs-on: ubuntu-latest
steps:
- name: Prerelease
uses: irongut/EditRelease@v1.2.0
with:
token: ${{ secrets.GITHUB_TOKEN }}
id: ${{ github.event.release.id }}
prerelease: true
# Main release job for all platforms
release:
needs: prerelease
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
build-type: [ 'standard', 'lite' ]
target-platform: [ '', 'android', 'freebsd', 'linux_musl', 'linux_musl_arm' ]
name: Release ${{ matrix.target-platform && format('{0} ', matrix.target-platform) || '' }}${{ matrix.build-type == 'lite' && 'Lite' || '' }}
runs-on: ubuntu-latest
steps:
- name: Free Disk Space (Ubuntu)
if: matrix.target-platform == ''
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: false
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
@ -33,17 +43,10 @@ jobs:
docker-images: true
swap-storage: true
- name: Prerelease
uses: irongut/EditRelease@v1.2.0
with:
token: ${{ secrets.GITHUB_TOKEN }}
id: ${{ github.event.release.id }}
prerelease: true
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
go-version: '1.24'
- name: Checkout
uses: actions/checkout@v4
@ -51,6 +54,7 @@ jobs:
fetch-depth: 0
- name: Install dependencies
if: matrix.target-platform == ''
run: |
sudo snap install zig --classic --beta
docker pull crazymax/xgo:latest
@ -59,70 +63,10 @@ jobs:
- name: Build
run: |
bash build.sh release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*
prerelease: false
release-lite:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release Lite
runs-on: ${{ matrix.platform }}
steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: false
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- name: Prerelease
uses: irongut/EditRelease@v1.2.0
with:
token: ${{ secrets.GITHUB_TOKEN }}
id: ${{ github.event.release.id }}
prerelease: true
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install dependencies
run: |
sudo snap install zig --classic --beta
docker pull crazymax/xgo:latest
go install github.com/crazy-max/xgo@latest
sudo apt install upx
- name: Build
run: |
bash build.sh release lite
bash build.sh release ${{ matrix.build-type == 'lite' && 'lite' || '' }} ${{ matrix.target-platform }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload assets
uses: softprops/action-gh-release@v2

View File

@ -1,69 +0,0 @@
name: Release builds (Android)
on:
release:
types: [ published ]
permissions:
contents: write
jobs:
release_android:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release android
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*
release_android_lite:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release lite android
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*

View File

@ -31,11 +31,8 @@ env:
REGISTRY: ghcr.io
ARTIFACT_NAME: 'binaries_docker_release'
ARTIFACT_NAME_LITE: 'binaries_docker_release_lite'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/ppc64le,linux/riscv64,linux/loong64' ### Temporarily disable Docker builds for linux/s390x architectures for unknown reasons.
IMAGE_PUSH: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }}
IMAGE_IS_PROD: ${{ github.ref_type == 'tag' || github.event.inputs.as_latest == 'true' }}
IMAGE_TAGS_BETA: |
type=raw,value=beta,enable={{is_default_branch}}
permissions:
packages: write
@ -65,17 +62,11 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build go binary (beta)
if: env.IMAGE_IS_PROD != 'true'
run: bash build.sh beta docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build go binary (release)
if: env.IMAGE_IS_PROD == 'true'
run: bash build.sh release docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
@ -88,7 +79,7 @@ jobs:
!build/musl-libs/**
build_binary_lite:
name: Build Binaries for Docker Release
name: Build Binaries for Docker Release (Lite)
runs-on: ubuntu-latest
steps:
- name: Checkout
@ -111,17 +102,11 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build go binary (beta)
if: env.IMAGE_IS_PROD != 'true'
run: bash build.sh beta lite docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build go binary (release)
if: env.IMAGE_IS_PROD == 'true'
run: bash build.sh release lite docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
@ -142,15 +127,19 @@ jobs:
image: ["latest", "ffmpeg", "aria2", "aio"]
include:
- image: "latest"
base_image_tag: "base"
build_arg: ""
tag_favor: ""
- image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-ffmpeg,onlatest=true"
- image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-aria2,onlatest=true"
- image: "aio"
base_image_tag: "aio"
build_arg: |
INSTALL_FFMPEG=true
INSTALL_ARIA2=true
@ -181,7 +170,7 @@ jobs:
if: env.IMAGE_PUSH == 'true'
uses: docker/login-action@v3
with:
username: ${{ env.DOCKERHUB_ORG_NAME }}
username: ${{ vars.DOCKERHUB_ORG_NAME_BACKUP || env.DOCKERHUB_ORG_NAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Docker meta
@ -192,13 +181,11 @@ jobs:
${{ env.REGISTRY }}/${{ env.GHCR_ORG_NAME }}/${{ env.IMAGE_NAME }}
${{ env.DOCKERHUB_ORG_NAME }}/${{ env.IMAGE_NAME_DOCKERHUB }}
tags: >
${{ env.IMAGE_IS_PROD == 'true' && (
github.event_name == 'workflow_dispatch'
${{ github.event_name == 'workflow_dispatch'
&& format('type=raw,value={0}', github.event.inputs.manual_tag)
|| format('type=raw,value={0}', github.ref_name)
) || env.IMAGE_TAGS_BETA }}
|| format('type=raw,value={0}', github.ref_name) }}
flavor: |
latest=${{ env.IMAGE_IS_PROD }}
latest=${{ github.event_name == 'push' || github.event.inputs.as_latest == 'true' }}
${{ matrix.tag_favor }}
- name: Build and push
@ -208,29 +195,35 @@ jobs:
context: .
file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: ${{ matrix.build_arg }}
build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }}
release_docker_lite:
needs: build_binary_lite
name: Release Docker image
name: Release Docker image (Lite)
runs-on: ubuntu-latest
strategy:
matrix:
image: ["latest", "ffmpeg", "aria2", "aio"]
include:
- image: "latest"
base_image_tag: "base"
build_arg: ""
tag_favor: "suffix=-lite,onlatest=true"
- image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-lite-ffmpeg,onlatest=true"
- image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-lite-aria2,onlatest=true"
- image: "aio"
base_image_tag: "aio"
build_arg: |
INSTALL_FFMPEG=true
INSTALL_ARIA2=true
@ -261,7 +254,7 @@ jobs:
if: env.IMAGE_PUSH == 'true'
uses: docker/login-action@v3
with:
username: ${{ env.DOCKERHUB_ORG_NAME }}
username: ${{ vars.DOCKERHUB_ORG_NAME_BACKUP || env.DOCKERHUB_ORG_NAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Docker meta
@ -272,13 +265,11 @@ jobs:
${{ env.REGISTRY }}/${{ env.GHCR_ORG_NAME }}/${{ env.IMAGE_NAME }}
${{ env.DOCKERHUB_ORG_NAME }}/${{ env.IMAGE_NAME_DOCKERHUB }}
tags: >
${{ env.IMAGE_IS_PROD == 'true' && (
github.event_name == 'workflow_dispatch'
${{ github.event_name == 'workflow_dispatch'
&& format('type=raw,value={0}', github.event.inputs.manual_tag)
|| format('type=raw,value={0}', github.ref_name)
) || env.IMAGE_TAGS_BETA }}
|| format('type=raw,value={0}', github.ref_name) }}
flavor: |
latest=${{ env.IMAGE_IS_PROD }}
latest=${{ github.event_name == 'push' || github.event.inputs.as_latest == 'true' }}
${{ matrix.tag_favor }}
- name: Build and push
@ -288,7 +279,9 @@ jobs:
context: .
file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: ${{ matrix.build_arg }}
build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }}

View File

@ -1,69 +0,0 @@
name: Release builds (Freebsd)
on:
release:
types: [ published ]
permissions:
contents: write
jobs:
release_freebsd:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release freebsd
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*
release_freebsd_lite:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release lite freebsd
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*

View File

@ -1,69 +0,0 @@
name: Release builds (linux_musl)
on:
release:
types: [ published ]
permissions:
contents: write
jobs:
release_linux_musl:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release linux_musl
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*
release_linux_musl_lite:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release lite linux_musl
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*

View File

@ -1,70 +0,0 @@
name: Release builds (linux_musl_arm)
on:
release:
types: [ published ]
permissions:
contents: write
jobs:
release_linux_musl_arm:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release linux_musl_arm
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*
release_linux_musl_arm_lite:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release lite linux_musl_arm
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*

38
.github/workflows/sync_repo.yml vendored Normal file
View File

@ -0,0 +1,38 @@
name: Sync to Gitee
on:
push:
branches:
- main
workflow_dispatch:
jobs:
sync:
runs-on: ubuntu-latest
name: Sync GitHub to Gitee
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup SSH
run: |
mkdir -p ~/.ssh
echo "${{ secrets.GITEE_SSH_PRIVATE_KEY }}" > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
ssh-keyscan gitee.com >> ~/.ssh/known_hosts
- name: Create single commit and push
run: |
git config user.name "GitHub Actions"
git config user.email "actions@github.com"
# Create a new branch
git checkout --orphan new-main
git add .
git commit -m "Sync from GitHub: $(date)"
# Add Gitee remote and force push
git remote add gitee ${{ vars.GITEE_REPO_URL }}
git push --force gitee new-main:main

View File

@ -1,4 +1,4 @@
name: Docker Beta Release
name: Beta Release (Docker)
on:
workflow_dispatch:
@ -20,8 +20,7 @@ env:
IMAGE_NAME_DOCKERHUB: openlist
REGISTRY: ghcr.io
ARTIFACT_NAME: 'binaries_docker_release'
ARTIFACT_NAME_LITE: 'binaries_docker_release_lite'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/ppc64le,linux/riscv64,linux/loong64' ### Temporarily disable Docker builds for linux/s390x architectures for unknown reasons.
IMAGE_PUSH: ${{ github.event_name == 'push' }}
IMAGE_TAGS_BETA: |
type=ref,event=pr
@ -29,7 +28,7 @@ env:
jobs:
build_binary:
name: Build Binaries for Docker Release
name: Build Binaries for Docker Release (Beta)
runs-on: ubuntu-latest
steps:
- name: Checkout
@ -56,6 +55,7 @@ jobs:
run: bash build.sh beta docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
@ -69,7 +69,7 @@ jobs:
release_docker:
needs: build_binary
name: Release Docker image
name: Release Docker image (Beta)
runs-on: ubuntu-latest
permissions:
packages: write
@ -78,15 +78,19 @@ jobs:
image: ["latest", "ffmpeg", "aria2", "aio"]
include:
- image: "latest"
base_image_tag: "base"
build_arg: ""
tag_favor: ""
- image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-ffmpeg,onlatest=true"
- image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-aria2,onlatest=true"
- image: "aio"
base_image_tag: "aio"
build_arg: |
INSTALL_FFMPEG=true
INSTALL_ARIA2=true
@ -117,7 +121,7 @@ jobs:
if: env.IMAGE_PUSH == 'true'
uses: docker/login-action@v3
with:
username: ${{ env.DOCKERHUB_ORG_NAME }}
username: ${{ vars.DOCKERHUB_ORG_NAME_BACKUP || env.DOCKERHUB_ORG_NAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Docker meta
@ -138,7 +142,9 @@ jobs:
context: .
file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: ${{ matrix.build_arg }}
build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }}

View File

@ -19,7 +19,7 @@ jobs:
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.EXTERNAL_REPO_TOKEN_LUCI_APP_OPENLIST }}
repository: ${{ vars.HOOK_REPO || 'OpenListTeam/luci-app-openlist' }}
repository: ${{ vars.HOOK_REPO || 'OpenListTeam/OpenList-OpenWRT' }}
event-type: update-hashes
client-payload: |
{

View File

@ -2,106 +2,76 @@
## Setup your machine
`OpenList` is written in [Go](https://golang.org/) and [React](https://reactjs.org/).
`OpenList` is written in [Go](https://golang.org/) and [SolidJS](https://www.solidjs.com/).
Prerequisites:
- [git](https://git-scm.com)
- [Go 1.20+](https://golang.org/doc/install)
- [Go 1.24+](https://golang.org/doc/install)
- [gcc](https://gcc.gnu.org/)
- [nodejs](https://nodejs.org/)
Clone `OpenList` and `OpenList-Frontend` anywhere:
## Cloning a fork
Fork and clone `OpenList` and `OpenList-Frontend` anywhere:
```shell
$ git clone https://github.com/OpenListTeam/OpenList.git
$ git clone --recurse-submodules https://github.com/OpenListTeam/OpenList-Frontend.git
$ git clone https://github.com/<your-username>/OpenList.git
$ git clone --recurse-submodules https://github.com/<your-username>/OpenList-Frontend.git
```
## Creating a branch
Create a new branch from the `main` branch, with an appropriate name.
```shell
$ git checkout -b <branch-name>
```
You should switch to the `main` branch for development.
## Preview your change
### backend
```shell
$ go run main.go
```
### frontend
```shell
$ pnpm dev
```
## Add a new driver
Copy `drivers/template` folder and rename it, and follow the comments in it.
## Create a commit
Commit messages should be well formatted, and to make that "standardized".
### Commit Message Format
Each commit message consists of a **header**, a **body** and a **footer**. The header has a special
format that includes a **type**, a **scope** and a **subject**:
Submit your pull request. For PR titles, follow [Conventional Commits](https://www.conventionalcommits.org).
```
<type>(<scope>): <subject>
<BLANK LINE>
<body>
<BLANK LINE>
<footer>
```
https://github.com/OpenListTeam/OpenList/issues/376
The **header** is mandatory and the **scope** of the header is optional.
Any line of the commit message cannot be longer than 100 characters! This allows the message to be easier
to read on GitHub as well as in various git tools.
### Revert
If the commit reverts a previous commit, it should begin with `revert: `, followed by the header
of the reverted commit.
In the body it should say: `This reverts commit <hash>.`, where the hash is the SHA of the commit
being reverted.
### Type
Must be one of the following:
* **feat**: A new feature
* **fix**: A bug fix
* **docs**: Documentation only changes
* **style**: Changes that do not affect the meaning of the code (white-space, formatting, missing
semi-colons, etc)
* **refactor**: A code change that neither fixes a bug nor adds a feature
* **perf**: A code change that improves performance
* **test**: Adding missing or correcting existing tests
* **build**: Affects project builds or dependency modifications
* **revert**: Restore the previous commit
* **ci**: Continuous integration of related file modifications
* **chore**: Changes to the build process or auxiliary tools and libraries such as documentation
generation
* **release**: Release a new version
### Scope
The scope could be anything specifying place of the commit change. For example `$location`,
`$browser`, `$compile`, `$rootScope`, `ngHref`, `ngClick`, `ngView`, etc...
You can use `*` when the change affects more than a single scope.
### Subject
The subject contains succinct description of the change:
* use the imperative, present tense: "change" not "changed" nor "changes"
* don't capitalize first letter
* no dot (.) at the end
### Body
Just as in the **subject**, use the imperative, present tense: "change" not "changed" nor "changes".
The body should include the motivation for the change and contrast this with previous behavior.
### Footer
The footer should contain any information about **Breaking Changes** and is also the place to
[reference GitHub issues that this commit closes](https://help.github.com/articles/closing-issues-via-commit-messages/).
**Breaking Changes** should start with the word `BREAKING CHANGE:` with a space or two newlines.
The rest of the commit message is then used for this.
It's suggested to sign your commits. See: [How to sign commits](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits)
## Submit a pull request
Push your branch to your `openlist` fork and open a pull request against the
`main` branch.
Please make sure your code has been formatted with `go fmt` or [prettier](https://prettier.io/) before submitting.
Push your branch to your `openlist` fork and open a pull request against the `main` branch.
## Merge your pull request
Your pull request will be merged after review. Please wait for the maintainer to merge your pull request after review.
At least 1 approving review is required by reviewers with write access. You can also request a review from maintainers.
## Delete your branch
(Optional) After your pull request is merged, you can delete your branch.
---
Thank you for your contribution! Let's make OpenList better together!

View File

@ -1,4 +1,7 @@
FROM docker.io/library/alpine:edge AS builder
### Default image is base. You can add other support by modifying BASE_IMAGE_TAG. The following parameters are supported: base (default), aria2, ffmpeg, aio
ARG BASE_IMAGE_TAG=base
FROM alpine:edge AS builder
LABEL stage=go-builder
WORKDIR /app/
RUN apk add --no-cache bash curl jq gcc git go musl-dev
@ -7,36 +10,27 @@ RUN go mod download
COPY ./ ./
RUN bash build.sh release docker
FROM alpine:edge
FROM openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
LABEL MAINTAINER="OpenList"
ARG INSTALL_FFMPEG=false
ARG INSTALL_ARIA2=false
LABEL MAINTAINER="OpenList"
ARG USER=openlist
ARG UID=1001
ARG GID=1001
WORKDIR /opt/openlist/
RUN apk update && \
apk upgrade --no-cache && \
apk add --no-cache bash ca-certificates su-exec tzdata; \
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
mkdir -p /opt/aria2/.aria2 && \
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
touch /opt/aria2/.aria2/aria2.session && \
/opt/aria2/.aria2/tracker.sh ; \
rm -rf /var/cache/apk/*
RUN addgroup -g ${GID} ${USER} && \
adduser -D -u ${UID} -G ${USER} ${USER} && \
mkdir -p /opt/openlist/data
COPY --chmod=755 --from=builder /app/bin/openlist ./
COPY --chmod=755 entrypoint.sh /entrypoint.sh
COPY --from=builder --chmod=755 --chown=${UID}:${GID} /app/bin/openlist ./
COPY --chmod=755 --chown=${UID}:${GID} entrypoint.sh /entrypoint.sh
USER ${USER}
RUN /entrypoint.sh version
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
VOLUME /opt/openlist/data/
EXPOSE 5244 5245
CMD [ "/entrypoint.sh" ]

View File

@ -1,34 +1,26 @@
FROM docker.io/library/alpine:edge
ARG BASE_IMAGE_TAG=base
FROM ghcr.io/openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
LABEL MAINTAINER="OpenList"
ARG TARGETPLATFORM
ARG INSTALL_FFMPEG=false
ARG INSTALL_ARIA2=false
LABEL MAINTAINER="OpenList"
ARG USER=openlist
ARG UID=1001
ARG GID=1001
WORKDIR /opt/openlist/
RUN apk update && \
apk upgrade --no-cache && \
apk add --no-cache bash ca-certificates su-exec tzdata; \
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
mkdir -p /opt/aria2/.aria2 && \
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
touch /opt/aria2/.aria2/aria2.session && \
/opt/aria2/.aria2/tracker.sh ; \
rm -rf /var/cache/apk/*
RUN addgroup -g ${GID} ${USER} && \
adduser -D -u ${UID} -G ${USER} ${USER} && \
mkdir -p /opt/openlist/data
COPY --chmod=755 /build/${TARGETPLATFORM}/openlist ./
COPY --chmod=755 entrypoint.sh /entrypoint.sh
COPY --chmod=755 --chown=${UID}:${GID} /build/${TARGETPLATFORM}/openlist ./
COPY --chmod=755 --chown=${UID}:${GID} entrypoint.sh /entrypoint.sh
USER ${USER}
RUN /entrypoint.sh version
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
VOLUME /opt/openlist/data/
EXPOSE 5244 5245
CMD [ "/entrypoint.sh" ]

View File

@ -20,6 +20,34 @@
- [CODE OF CONDUCT](./CODE_OF_CONDUCT.md)
- [LICENSE](./LICENSE)
## Disclaimer
OpenList is an open-source project independently maintained by the OpenList Team, following the AGPL-3.0 license and committed to maintaining complete code openness and modification transparency.
We have noticed the emergence of some third-party projects in the community with names similar to this project, such as OpenListApp/OpenListApp, as well as some paid proprietary software using the same or similar naming. To avoid user confusion, we hereby declare:
- OpenList has no official association with any third-party derivative projects.
- All software, code, and services of this project are maintained by the OpenList Team and are freely available on GitHub.
- Project documentation and API services primarily rely on charitable resources provided by Cloudflare. There are currently no paid plans or commercial deployments, and the use of existing features does not involve any costs.
We respect the community's rights to free use and derivative development, but we also strongly urge downstream projects:
- Should not use the "OpenList" name for impersonation promotion or commercial gain;
- Must not distribute OpenList-based code in a closed-source manner or violate AGPL license terms.
To better maintain healthy ecosystem development, we recommend:
- Clearly indicate the project source and choose appropriate open-source licenses in accordance with the open-source spirit;
- If involving commercial use, please avoid using "OpenList" or any confusing naming as the project name;
- If you need to use materials located under OpenListTeam/Logo, you may modify and use them under compliance with the agreement.
Thank you for your support and understanding of the OpenList project.
## Features
- [x] Multiple storages
@ -78,8 +106,9 @@
## Document
- 📘 [Docs & Install Guide](https://docs.oplist.org)
- 📚 [Backup Docs Site](https://docs.openlist.team)
- 📘 [Global Site](https://doc.oplist.org)
- 📚 [Backup Site](https://doc.openlist.team)
- 🌏 [CN Site](https://doc.oplist.org.cn)
## Demo

View File

@ -20,6 +20,34 @@
- [行为准则](./CODE_OF_CONDUCT.md)
- [许可证](./LICENSE)
## 免责声明
OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3.0 许可证,致力于保持完整的代码开放性和修改透明性。
我们注意到社区中出现了一些与本项目名称相似的第三方项目,如 OpenListApp/OpenListApp以及部分采用相同或近似命名的收费专有软件。为避免用户误解现声明如下
- OpenList 与任何第三方衍生项目无官方关联。
- 本项目的全部软件、代码与服务由 OpenList 团队维护,可在 GitHub 免费获取。
- 项目文档与 API 服务均主要依托于 Cloudflare 提供的公益资源,目前无任何收费计划或商业部署,现有功能使用不涉及任何支出。
我们尊重社区的自由使用与衍生开发权利,但也强烈呼吁下游项目:
- 不应以“OpenList”名义进行冒名宣传或获取商业利益
- 不得将基于 OpenList 的代码进行闭源分发或违反 AGPL 许可证条款。
为了更好地维护生态健康发展,我们建议:
- 明确注明项目来源,并以符合开源精神的方式选择适当的开源许可证;
- 如涉及商业用途请避免使用“OpenList”或任何会产生混淆的方式作为项目名称
- 若需使用本项目位于 OpenListTeam/Logo 下的素材,可在遵守协议的前提下进行修改后使用。
感谢您对 OpenList 项目的支持与理解。
## 功能
- [x] 多种存储
@ -78,8 +106,9 @@
## 文档
- 📘 [文档与安装指南](https://docs.oplist.org)
- 📚 [备用文档站点](https://docs.openlist.team)
- 🌏 [国内站点](https://doc.oplist.org.cn)
- 📘 [海外站点](https://doc.oplist.org)
- 📚 [备用站点](https://doc.openlist.team)
## 演示

View File

@ -20,6 +20,34 @@
- [行動規範](./CODE_OF_CONDUCT.md)
- [ライセンス](./LICENSE)
## 免責事項
OpenListは、OpenListチームが独立して維持するオープンソースプロジェクトであり、AGPL-3.0ライセンスに従い、完全なコードの開放性と変更の透明性を維持することに専念しています。
コミュニティ内で、OpenListApp/OpenListAppなど、本プロジェクトと類似した名称を持つサードパーティプロジェクトや、同一または類似した命名を採用する有料専有ソフトウェアが出現していることを確認しています。ユーザーの誤解を避けるため、以下のように宣言いたします
- OpenListは、いかなるサードパーティ派生プロジェクトとも公式な関連性はありません。
- 本プロジェクトのすべてのソフトウェア、コード、サービスはOpenListチームによって維持され、GitHubで無料で取得できます。
- プロジェクトドキュメントとAPIサービスは主にCloudflareが提供する公益リソースに依存しており、現在有料プランや商業展開はなく、既存機能の使用に費用は発生しません。
私たちはコミュニティの自由な使用と派生開発の権利を尊重しますが、下流プロジェクトに強く呼びかけます:
- 「OpenList」の名前で偽装宣伝や商業利益を得るべきではありません
- OpenListベースのコードをクローズドソースで配布したり、AGPLライセンス条項に違反してはいけません。
エコシステムの健全な発展をより良く維持するため、以下を推奨します:
- プロジェクトの出典を明確に示し、オープンソース精神に合致する適切なオープンソースライセンスを選択する;
- 商業用途が関わる場合は、「OpenList」や混乱を招く可能性のある名前をプロジェクト名として使用することを避ける
- OpenListTeam/Logo下の素材を使用する必要がある場合は、協定を遵守した上で修正して使用できます。
OpenListプロジェクトへのご支援とご理解をありがとうございます。
## 特徴
- [x] 複数ストレージ
@ -78,8 +106,9 @@
## ドキュメント
- 📘 [ドキュメント・インストールガイド](https://docs.oplist.org)
- 📚 [バックアップドキュメントサイト](https://docs.openlist.team)
- 📘 [グローバルサイト](https://doc.oplist.org)
- 📚 [バックアップサイト](https://doc.openlist.team)
- 🌏 [CNサイト](https://doc.oplist.org.cn)
## デモ

View File

@ -20,6 +20,34 @@
- [Gedragscode](./CODE_OF_CONDUCT.md)
- [Licentie](./LICENSE)
## Disclaimer
OpenList is een open-source project dat onafhankelijk wordt onderhouden door het OpenList Team, volgend op de AGPL-3.0 licentie en toegewijd aan het behouden van volledige code openheid en transparantie van wijzigingen.
We hebben gemerkt dat er in de gemeenschap enkele derde partij projecten zijn verschenen met namen vergelijkbaar met dit project, zoals OpenListApp/OpenListApp, evenals enkele betaalde eigendomssoftware die dezelfde of soortgelijke naamgeving gebruikt. Om verwarring bij gebruikers te voorkomen, verklaren we hierbij:
- OpenList heeft geen officiële associatie met enige derde partij afgeleide projecten.
- Alle software, code en diensten van dit project worden onderhouden door het OpenList Team en zijn gratis beschikbaar op GitHub.
- Projectdocumentatie en API diensten zijn voornamelijk afhankelijk van liefdadigheidsbronnen verstrekt door Cloudflare. Er zijn momenteel geen betaalplannen of commerciële implementaties, en het gebruik van bestaande functies brengt geen kosten met zich mee.
We respecteren de rechten van de gemeenschap voor vrij gebruik en afgeleide ontwikkeling, maar we roepen downstream projecten ook ten zeerste op:
- Mogen niet de "OpenList" naam gebruiken voor namaakpromotie of commercieel gewin;
- Mogen OpenList-gebaseerde code niet distribueren op een closed-source manier of AGPL licentievoorwaarden schenden.
Om een gezonde ecosysteemontwikkeling beter te onderhouden, bevelen we aan:
- Duidelijk de projectbron aangeven en passende open-source licenties kiezen in overeenstemming met de open-source geest;
- Bij commercieel gebruik, vermijd het gebruik van "OpenList" of enige verwarrende naamgeving als projectnaam;
- Als u materialen onder OpenListTeam/Logo moet gebruiken, kunt u deze wijzigen en gebruiken onder naleving van de overeenkomst.
Dank u voor uw ondersteuning en begrip
## Functies
- [x] Meerdere opslagmogelijkheden
@ -78,8 +106,9 @@
## Documentatie
- 📘 [Documentatie & Installatiegids](https://docs.oplist.org)
- 📚 [Back-up documentatiesite](https://docs.openlist.team)
- 📘 [Global Site](https://doc.oplist.org)
- 📚 [Backup Site](https://doc.openlist.team)
- 🌏 [CN Site](https://doc.oplist.org.cn)
## Demo

259
build.sh
View File

@ -4,6 +4,9 @@ builtAt="$(date +'%F %T %z')"
gitAuthor="The OpenList Projects Contributors <noreply@openlist.team>"
gitCommit=$(git log --pretty=format:"%h" -1)
# Set frontend repository, default to OpenListTeam/OpenList-Frontend
frontendRepo="${FRONTEND_REPO:-OpenListTeam/OpenList-Frontend}"
githubAuthArgs=""
if [ -n "$GITHUB_TOKEN" ]; then
githubAuthArgs="--header \"Authorization: Bearer $GITHUB_TOKEN\""
@ -17,15 +20,15 @@ fi
if [ "$1" = "dev" ]; then
version="dev"
webVersion="dev"
webVersion="rolling"
elif [ "$1" = "beta" ]; then
version="beta"
webVersion="dev"
webVersion="rolling"
else
git tag -d beta || true
# Always true if there's no tag
version=$(git describe --abbrev=0 --tags 2>/dev/null || echo "v0.0.0")
webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/$frontendRepo/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
fi
echo "backend version: $version"
@ -45,30 +48,21 @@ ldflags="\
-X 'github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=$webVersion' \
"
FetchWebDev() {
pre_release_tag=$(eval "curl -fsSL --max-time 2 $githubAuthArgs https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases" | jq -r 'map(select(.prerelease)) | first | .tag_name')
if [ -z "$pre_release_tag" ] || [ "$pre_release_tag" == "null" ]; then
# fall back to latest release
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"")
else
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/tags/$pre_release_tag\"")
fi
FetchWebRolling() {
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/tags/rolling\"")
pre_release_assets=$(echo "$pre_release_json" | jq -r '.assets[].browser_download_url')
if [ "$useLite" = true ]; then
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist-lite" | grep "\.tar\.gz$")
else
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist" | grep -v "lite" | grep "\.tar\.gz$")
fi
curl -fsSL "$pre_release_tar_url" -o web-dist-dev.tar.gz
# There is no lite for rolling
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist" | grep -v "lite" | grep "\.tar\.gz$")
curl -fsSL "$pre_release_tar_url" -o dist.tar.gz
rm -rf public/dist && mkdir -p public/dist
tar -zxvf web-dist-dev.tar.gz -C public/dist
rm -rf web-dist-dev.tar.gz
tar -zxvf dist.tar.gz -C public/dist
rm -rf dist.tar.gz
}
FetchWebRelease() {
release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"")
release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/latest\"")
release_assets=$(echo "$release_json" | jq -r '.assets[].browser_download_url')
if [ "$useLite" = true ]; then
@ -95,6 +89,45 @@ BuildWinArm64() {
go build -o "$1" -ldflags="$ldflags" -tags=jsoniter .
}
BuildWin7() {
# Setup Win7 Go compiler (patched version that supports Windows 7)
go_version=$(go version | grep -o 'go[0-9]\+\.[0-9]\+\.[0-9]\+' | sed 's/go//')
echo "Detected Go version: $go_version"
curl -fsSL --retry 3 -o go-win7.zip -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/XTLS/go-win7/releases/download/patched-${go_version}/go-for-win7-linux-amd64.zip"
rm -rf go-win7
unzip go-win7.zip -d go-win7
rm go-win7.zip
# Set permissions for all wrapper files
chmod +x ./wrapper/zcc-win7
chmod +x ./wrapper/zcxx-win7
chmod +x ./wrapper/zcc-win7-386
chmod +x ./wrapper/zcxx-win7-386
# Build for both 386 and amd64 architectures
for arch in "386" "amd64"; do
echo "building for windows7-${arch}"
export GOOS=windows
export GOARCH=${arch}
export CGO_ENABLED=1
# Use architecture-specific wrapper files
if [ "$arch" = "386" ]; then
export CC=$(pwd)/wrapper/zcc-win7-386
export CXX=$(pwd)/wrapper/zcxx-win7-386
else
export CC=$(pwd)/wrapper/zcc-win7
export CXX=$(pwd)/wrapper/zcxx-win7
fi
# Use the patched Go compiler for Win7 compatibility
$(pwd)/go-win7/bin/go build -o "${1}-${arch}.exe" -ldflags="$ldflags" -tags=jsoniter .
done
}
BuildDev() {
rm -rf .git/
mkdir -p "dist"
@ -121,8 +154,8 @@ BuildDev() {
xgo -targets=windows/amd64,darwin/amd64,darwin/arm64 -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
mv "$appName"-* dist
cd dist
cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
upx -9 ./"$appName"-windows-amd64-upx.exe
# cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
# upx -9 ./"$appName"-windows-amd64-upx.exe
find . -type f -print0 | xargs -0 md5sum >md5.txt
cat md5.txt
}
@ -134,7 +167,7 @@ BuildDocker() {
PrepareBuildDockerMusl() {
mkdir -p build/musl-libs
BASE="https://github.com/OpenListTeam/musl-compilers/releases/latest/download/"
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross)
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross loongarch64-linux-musl-cross) ## Disable s390x-linux-musl-cross builds
for i in "${FILES[@]}"; do
url="${BASE}${i}.tgz"
lib_tgz="build/${i}.tgz"
@ -153,8 +186,8 @@ BuildDockerMultiplatform() {
docker_lflags="--extldflags '-static -fpic' $ldflags"
export CGO_ENABLED=1
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x linux-riscv64 linux-ppc64le)
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc)
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-riscv64 linux-ppc64le linux-loong64) ## Disable linux-s390x builds
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc loongarch64-linux-musl-gcc) ## Disable s390x-linux-musl-gcc builds
for i in "${!OS_ARCHES[@]}"; do
os_arch=${OS_ARCHES[$i]}
cgo_cc=${CGO_ARGS[$i]}
@ -186,12 +219,171 @@ BuildRelease() {
rm -rf .git/
mkdir -p "build"
BuildWinArm64 ./build/"$appName"-windows-arm64.exe
BuildWin7 ./build/"$appName"-windows7
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
# why? Because some target platforms seem to have issues with upx compression
upx -9 ./"$appName"-linux-amd64
cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
upx -9 ./"$appName"-windows-amd64-upx.exe
# upx -9 ./"$appName"-linux-amd64
# cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
# upx -9 ./"$appName"-windows-amd64-upx.exe
mv "$appName"-* build
# Build LoongArch with glibc (both old world abi1.0 and new world abi2.0)
# Separate from musl builds to avoid cache conflicts
BuildLoongGLIBC ./build/$appName-linux-loong64-abi1.0 abi1.0
BuildLoongGLIBC ./build/$appName-linux-loong64 abi2.0
}
BuildLoongGLIBC() {
local target_abi="$2"
local output_file="$1"
local oldWorldGoVersion="1.24.3"
if [ "$target_abi" = "abi1.0" ]; then
echo building for linux-loong64-abi1.0
else
echo building for linux-loong64-abi2.0
target_abi="abi2.0" # Default to abi2.0 if not specified
fi
# Note: No longer need global cache cleanup since ABI1.0 uses isolated cache directory
echo "Using optimized cache strategy: ABI1.0 has isolated cache, ABI2.0 uses standard cache"
if [ "$target_abi" = "abi1.0" ]; then
# Setup abi1.0 toolchain and patched Go compiler similar to cgo-action implementation
echo "Setting up Loongson old-world ABI1.0 toolchain and patched Go compiler..."
# Download and setup patched Go compiler for old-world
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
-o go-loong64-abi1.0.tar.gz; then
echo "Error: Failed to download patched Go compiler for old-world ABI1.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
-o go-loong64-abi1.0.tar.gz || true
fi
return 1
fi
rm -rf go-loong64-abi1.0
mkdir go-loong64-abi1.0
if ! tar -xzf go-loong64-abi1.0.tar.gz -C go-loong64-abi1.0 --strip-components=1; then
echo "Error: Failed to extract patched Go compiler"
return 1
fi
rm go-loong64-abi1.0.tar.gz
# Download and setup GCC toolchain for old-world
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/loongson-gnu-toolchain-8.3.novec-x86_64-loongarch64-linux-gnu-rc1.1.tar.xz" \
-o gcc8-loong64-abi1.0.tar.xz; then
echo "Error: Failed to download GCC toolchain for old-world ABI1.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/loongson-gnu-toolchain-8.3.novec-x86_64-loongarch64-linux-gnu-rc1.1.tar.xz" \
-o gcc8-loong64-abi1.0.tar.xz || true
fi
return 1
fi
rm -rf gcc8-loong64-abi1.0
mkdir gcc8-loong64-abi1.0
if ! tar -Jxf gcc8-loong64-abi1.0.tar.xz -C gcc8-loong64-abi1.0 --strip-components=1; then
echo "Error: Failed to extract GCC toolchain"
return 1
fi
rm gcc8-loong64-abi1.0.tar.xz
# Setup separate cache directory for ABI1.0 to avoid cache pollution
abi1_cache_dir="$(pwd)/go-loong64-abi1.0-cache"
mkdir -p "$abi1_cache_dir"
echo "Using separate cache directory for ABI1.0: $abi1_cache_dir"
# Use patched Go compiler for old-world build (critical for ABI1.0 compatibility)
echo "Building with patched Go compiler for old-world ABI1.0..."
echo "Using isolated cache directory: $abi1_cache_dir"
# Use env command to set environment variables locally without affecting global environment
if ! env GOOS=linux GOARCH=loong64 \
CC="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc" \
CXX="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++" \
CGO_ENABLED=1 \
GOCACHE="$abi1_cache_dir" \
$(pwd)/go-loong64-abi1.0/bin/go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed with patched Go compiler"
echo "Attempting retry with cache cleanup..."
env GOCACHE="$abi1_cache_dir" $(pwd)/go-loong64-abi1.0/bin/go clean -cache
if ! env GOOS=linux GOARCH=loong64 \
CC="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc" \
CXX="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++" \
CGO_ENABLED=1 \
GOCACHE="$abi1_cache_dir" \
$(pwd)/go-loong64-abi1.0/bin/go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed again after cache cleanup"
echo "Build environment details:"
echo "GOOS=linux"
echo "GOARCH=loong64"
echo "CC=$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc"
echo "CXX=$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++"
echo "CGO_ENABLED=1"
echo "GOCACHE=$abi1_cache_dir"
echo "Go version: $($(pwd)/go-loong64-abi1.0/bin/go version)"
echo "GCC version: $($(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc --version | head -1)"
return 1
fi
fi
else
# Setup abi2.0 toolchain for new world glibc build
echo "Setting up new-world ABI2.0 toolchain..."
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/cross-tools/releases/download/20250507/x86_64-cross-tools-loongarch64-unknown-linux-gnu-legacy.tar.xz" \
-o gcc12-loong64-abi2.0.tar.xz; then
echo "Error: Failed to download GCC toolchain for new-world ABI2.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/cross-tools/releases/download/20250507/x86_64-cross-tools-loongarch64-unknown-linux-gnu-legacy.tar.xz" \
-o gcc12-loong64-abi2.0.tar.xz || true
fi
return 1
fi
rm -rf gcc12-loong64-abi2.0
mkdir gcc12-loong64-abi2.0
if ! tar -Jxf gcc12-loong64-abi2.0.tar.xz -C gcc12-loong64-abi2.0 --strip-components=1; then
echo "Error: Failed to extract GCC toolchain"
return 1
fi
rm gcc12-loong64-abi2.0.tar.xz
export GOOS=linux
export GOARCH=loong64
export CC=$(pwd)/gcc12-loong64-abi2.0/bin/loongarch64-unknown-linux-gnu-gcc
export CXX=$(pwd)/gcc12-loong64-abi2.0/bin/loongarch64-unknown-linux-gnu-g++
export CGO_ENABLED=1
# Use standard Go compiler for new-world build
echo "Building with standard Go compiler for new-world ABI2.0..."
if ! go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed with standard Go compiler"
echo "Attempting retry with cache cleanup..."
go clean -cache
if ! go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed again after cache cleanup"
echo "Build environment details:"
echo "GOOS=$GOOS"
echo "GOARCH=$GOARCH"
echo "CC=$CC"
echo "CXX=$CXX"
echo "CGO_ENABLED=$CGO_ENABLED"
echo "Go version: $(go version)"
echo "GCC version: $($CC --version | head -1)"
return 1
fi
fi
fi
}
BuildReleaseLinuxMusl() {
@ -249,6 +441,7 @@ BuildReleaseLinuxMuslArm() {
done
}
BuildReleaseAndroid() {
rm -rf .git/
mkdir -p "build"
@ -278,6 +471,7 @@ BuildReleaseFreeBSD() {
freebsd_version=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/freebsd/freebsd-src/tags\"" | \
jq -r '.[].name' | \
grep '^release/14\.' | \
grep -v -- '-p[0-9]*$' | \
sort -V | \
tail -1 | \
sed 's/release\///' | \
@ -343,7 +537,7 @@ MakeRelease() {
tar -czvf compress/"$i$liteSuffix".tar.gz "$appName"
rm -f "$appName"
done
for i in $(find . -type f -name "$appName-windows-*"); do
for i in $(find . -type f \( -name "$appName-windows-*" -o -name "$appName-windows7-*" \)); do
cp "$i" "$appName".exe
zip compress/$(echo $i | sed 's/\.[^.]*$//')$liteSuffix.zip "$appName".exe
rm -f "$appName".exe
@ -390,7 +584,7 @@ for arg in "$@"; do
done
if [ "$buildType" = "dev" ]; then
FetchWebDev
FetchWebRolling
if [ "$dockerType" = "docker" ]; then
BuildDocker
elif [ "$dockerType" = "docker-multiplatform" ]; then
@ -402,7 +596,7 @@ if [ "$buildType" = "dev" ]; then
fi
elif [ "$buildType" = "release" -o "$buildType" = "beta" ]; then
if [ "$buildType" = "beta" ]; then
FetchWebDev
FetchWebRolling
else
FetchWebRelease
fi
@ -483,4 +677,5 @@ else
echo -e " $0 release"
echo -e " $0 release lite"
echo -e " $0 release docker lite"
echo -e " $0 release linux_musl"
fi

View File

@ -4,6 +4,8 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
package cmd
import (
"fmt"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
@ -24,10 +26,11 @@ var AdminCmd = &cobra.Command{
if err != nil {
utils.Log.Errorf("failed get admin user: %+v", err)
} else {
utils.Log.Infof("Admin user's username: %s", admin.Username)
utils.Log.Infof("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed")
utils.Log.Infof("You can reset the password with a random string by running [openlist admin random]")
utils.Log.Infof("You can also set a new password by running [openlist admin set NEW_PASSWORD]")
utils.Log.Infof("get admin user from CLI")
fmt.Println("Admin user's username:", admin.Username)
fmt.Println("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed")
fmt.Println("You can reset the password with a random string by running [openlist admin random]")
fmt.Println("You can also set a new password by running [openlist admin set NEW_PASSWORD]")
}
},
}
@ -36,6 +39,7 @@ var RandomPasswordCmd = &cobra.Command{
Use: "random",
Short: "Reset admin user's password to a random string",
Run: func(cmd *cobra.Command, args []string) {
utils.Log.Infof("reset admin user's password to a random string from CLI")
newPwd := random.String(8)
setAdminPassword(newPwd)
},
@ -44,12 +48,12 @@ var RandomPasswordCmd = &cobra.Command{
var SetPasswordCmd = &cobra.Command{
Use: "set",
Short: "Set admin user's password",
Run: func(cmd *cobra.Command, args []string) {
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
utils.Log.Errorf("Please enter the new password")
return
return fmt.Errorf("Please enter the new password")
}
setAdminPassword(args[0])
return nil
},
}
@ -60,7 +64,8 @@ var ShowTokenCmd = &cobra.Command{
Init()
defer Release()
token := setting.GetStr(conf.Token)
utils.Log.Infof("Admin token: %s", token)
utils.Log.Infof("show admin token from CLI")
fmt.Println("Admin token:", token)
},
}
@ -77,9 +82,10 @@ func setAdminPassword(pwd string) {
utils.Log.Errorf("failed update admin user: %+v", err)
return
}
utils.Log.Infof("admin user has been updated:")
utils.Log.Infof("username: %s", admin.Username)
utils.Log.Infof("password: %s", pwd)
utils.Log.Infof("admin user has been update from CLI")
fmt.Println("admin user has been updated:")
fmt.Println("username:", admin.Username)
fmt.Println("password:", pwd)
DelAdminCacheOnline()
}

View File

@ -4,6 +4,8 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
package cmd
import (
"fmt"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/spf13/cobra"
@ -24,7 +26,8 @@ var Cancel2FACmd = &cobra.Command{
if err != nil {
utils.Log.Errorf("failed to cancel 2FA: %+v", err)
} else {
utils.Log.Info("2FA canceled")
utils.Log.Infof("2FA is canceled from CLI")
fmt.Println("2FA canceled")
DelAdminCacheOnline()
}
}

View File

@ -16,7 +16,7 @@ var RootCmd = &cobra.Command{
Short: "A file list program that supports multiple storage.",
Long: `A file list program that supports multiple storage,
built with love by OpenListTeam.
Complete documentation is available at https://docs.openlist.team/`,
Complete documentation is available at https://doc.oplist.org/`,
}
func Execute() {

View File

@ -19,6 +19,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server"
"github.com/OpenListTeam/OpenList/v4/server/middlewares"
"github.com/OpenListTeam/sftpd-openlist"
ftpserver "github.com/fclairamb/ftpserverlib"
"github.com/gin-gonic/gin"
@ -47,7 +48,15 @@ the address is defined in config file`,
gin.SetMode(gin.ReleaseMode)
}
r := gin.New()
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
// gin log
if conf.Conf.Log.Filter.Enable {
r.Use(middlewares.FilteredLogger())
} else {
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out))
}
r.Use(gin.RecoveryWithWriter(log.StandardLogger().Out))
server.Init(r)
var httpHandler http.Handler = r
if conf.Conf.Scheme.EnableH2c {
@ -56,6 +65,7 @@ the address is defined in config file`,
var httpSrv, httpsSrv, unixSrv *http.Server
if conf.Conf.Scheme.HttpPort != -1 {
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
fmt.Printf("start HTTP server @ %s\n", httpBase)
utils.Log.Infof("start HTTP server @ %s", httpBase)
httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler}
go func() {
@ -67,6 +77,7 @@ the address is defined in config file`,
}
if conf.Conf.Scheme.HttpsPort != -1 {
httpsBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpsPort)
fmt.Printf("start HTTPS server @ %s\n", httpsBase)
utils.Log.Infof("start HTTPS server @ %s", httpsBase)
httpsSrv = &http.Server{Addr: httpsBase, Handler: r}
go func() {
@ -77,6 +88,7 @@ the address is defined in config file`,
}()
}
if conf.Conf.Scheme.UnixFile != "" {
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile)
unixSrv = &http.Server{Handler: httpHandler}
go func() {
@ -105,6 +117,7 @@ the address is defined in config file`,
s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
server.InitS3(s3r)
s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port)
fmt.Printf("start S3 server @ %s\n", s3Base)
utils.Log.Infof("start S3 server @ %s", s3Base)
go func() {
var err error
@ -129,6 +142,7 @@ the address is defined in config file`,
if err != nil {
utils.Log.Fatalf("failed to start ftp driver: %s", err.Error())
} else {
fmt.Printf("start ftp server on %s\n", conf.Conf.FTP.Listen)
utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen)
go func() {
ftpServer = ftpserver.NewFtpServer(ftpDriver)
@ -147,6 +161,7 @@ the address is defined in config file`,
if err != nil {
utils.Log.Fatalf("failed to start sftp driver: %s", err.Error())
} else {
fmt.Printf("start sftp server on %s", conf.Conf.SFTP.Listen)
utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen)
go func() {
sftpServer = sftpd.NewSftpServer(sftpDriver)

View File

@ -4,6 +4,7 @@ Copyright © 2023 NAME HERE <EMAIL ADDRESS>
package cmd
import (
"fmt"
"os"
"strconv"
@ -22,28 +23,61 @@ var storageCmd = &cobra.Command{
}
var disableStorageCmd = &cobra.Command{
Use: "disable",
Short: "Disable a storage",
Run: func(cmd *cobra.Command, args []string) {
Use: "disable [mount path]",
Short: "Disable a storage by mount path",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
utils.Log.Errorf("mount path is required")
return
return fmt.Errorf("mount path is required")
}
mountPath := args[0]
Init()
defer Release()
storage, err := db.GetStorageByMountPath(mountPath)
if err != nil {
utils.Log.Errorf("failed to query storage: %+v", err)
} else {
storage.Disabled = true
err = db.UpdateStorage(storage)
if err != nil {
utils.Log.Errorf("failed to update storage: %+v", err)
} else {
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath)
return fmt.Errorf("failed to query storage: %+v", err)
}
storage.Disabled = true
err = db.UpdateStorage(storage)
if err != nil {
return fmt.Errorf("failed to update storage: %+v", err)
}
utils.Log.Infof("Storage with mount path [%s] has been disabled from CLI", mountPath)
fmt.Printf("Storage with mount path [%s] has been disabled\n", mountPath)
return nil
},
}
var deleteStorageCmd = &cobra.Command{
Use: "delete [id]",
Short: "Delete a storage by id",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return fmt.Errorf("id is required")
}
id, err := strconv.Atoi(args[0])
if err != nil {
return fmt.Errorf("id must be a number")
}
if force, _ := cmd.Flags().GetBool("force"); force {
fmt.Printf("Are you sure you want to delete storage with id [%d]? [y/N]: ", id)
var confirm string
fmt.Scanln(&confirm)
if confirm != "y" && confirm != "Y" {
fmt.Println("Delete operation cancelled.")
return nil
}
}
Init()
defer Release()
err = db.DeleteStorageById(uint(id))
if err != nil {
return fmt.Errorf("failed to delete storage by id: %+v", err)
}
utils.Log.Infof("Storage with id [%d] have been deleted from CLI", id)
fmt.Printf("Storage with id [%d] have been deleted\n", id)
return nil
},
}
@ -88,14 +122,14 @@ var storageTableHeight int
var listStorageCmd = &cobra.Command{
Use: "list",
Short: "List all storages",
Run: func(cmd *cobra.Command, args []string) {
RunE: func(cmd *cobra.Command, args []string) error {
Init()
defer Release()
storages, _, err := db.GetStorages(1, -1)
if err != nil {
utils.Log.Errorf("failed to query storages: %+v", err)
return fmt.Errorf("failed to query storages: %+v", err)
} else {
utils.Log.Infof("Found %d storages", len(storages))
fmt.Printf("Found %d storages\n", len(storages))
columns := []table.Column{
{Title: "ID", Width: 4},
{Title: "Driver", Width: 16},
@ -138,10 +172,11 @@ var listStorageCmd = &cobra.Command{
m := model{t}
if _, err := tea.NewProgram(m).Run(); err != nil {
utils.Log.Errorf("failed to run program: %+v", err)
fmt.Printf("failed to run program: %+v\n", err)
os.Exit(1)
}
}
return nil
},
}
@ -151,6 +186,8 @@ func init() {
storageCmd.AddCommand(disableStorageCmd)
storageCmd.AddCommand(listStorageCmd)
storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height")
storageCmd.AddCommand(deleteStorageCmd)
deleteStorageCmd.Flags().BoolP("force", "f", false, "Force delete without confirmation")
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command

View File

@ -6,10 +6,9 @@ services:
ports:
- '5244:5244'
- '5245:5245'
user: '0:0'
environment:
- PUID=0
- PGID=0
- UMASK=022
- TZ=UTC
- TZ=Asia/Shanghai
container_name: openlist
image: 'openlistteam/openlist:latest'

View File

@ -1,43 +1,60 @@
package _115
import (
"errors"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
log "github.com/sirupsen/logrus"
)
var (
md5Salt = "Qclm8MGWUv59TnrR0XPg"
appVer = "27.0.5.7"
appVer = "35.6.0.3"
)
func (d *Pan115) getAppVersion() ([]driver115.AppVersion, error) {
result := driver115.VersionResp{}
resp, err := base.RestyClient.R().Get(driver115.ApiGetVersion)
err = driver115.CheckErr(err, &result, resp)
func (d *Pan115) getAppVersion() (string, error) {
result := VersionResp{}
res, err := base.RestyClient.R().Get(driver115.ApiGetVersion)
if err != nil {
return nil, err
return "", err
}
return result.Data.GetAppVersions(), nil
err = utils.Json.Unmarshal(res.Body(), &result)
if err != nil {
return "", err
}
if len(result.Error) > 0 {
return "", errors.New(result.Error)
}
return result.Data.Win.Version, nil
}
func (d *Pan115) getAppVer() string {
// todo add some cache
vers, err := d.getAppVersion()
ver, err := d.getAppVersion()
if err != nil {
log.Warnf("[115] get app version failed: %v", err)
return appVer
}
for _, ver := range vers {
if ver.AppName == "win" {
return ver.Version
}
if len(ver) > 0 {
return ver
}
return appVer
}
func (d *Pan115) initAppVer() {
appVer = d.getAppVer()
log.Debugf("use app version: %v", appVer)
}
type VersionResp struct {
Error string `json:"error,omitempty"`
Data Versions `json:"data"`
}
type Versions struct {
Win Version `json:"win"`
}
type Version struct {
Version string `json:"version_code"`
}

View File

@ -186,7 +186,7 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
preHash = strings.ToUpper(preHash)
fullHash := stream.GetHash().GetHash(utils.SHA1)
if len(fullHash) != utils.SHA1.Width {
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA1)
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
if err != nil {
return nil, err
}

View File

@ -18,7 +18,6 @@ var config = driver.Config{
Name: "115 Cloud",
DefaultRoot: "0",
// OnlyProxy: true,
// OnlyLocal: true,
// NoOverwriteUpload: true,
}

View File

@ -321,7 +321,7 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
err error
)
tmpF, err := s.CacheFullInTempFile()
tmpF, err := s.CacheFullAndWriter(&up, nil)
if err != nil {
return nil, err
}

View File

@ -8,6 +8,7 @@ import (
"strings"
"time"
sdk "github.com/OpenListTeam/115-sdk-go"
"github.com/OpenListTeam/OpenList/v4/cmd/flags"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
@ -16,7 +17,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
sdk "github.com/OpenListTeam/115-sdk-go"
"golang.org/x/time/rate"
)
@ -131,6 +131,23 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}, nil
}
func (d *Open115) GetObjInfo(ctx context.Context, path string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
resp, err := d.client.GetFolderInfoByPath(ctx, path)
if err != nil {
return nil, err
}
return &Obj{
Fid: resp.FileID,
Fn: resp.FileName,
Fc: resp.FileCategory,
Sha1: resp.Sha1,
Pc: resp.PickCode,
}, nil
}
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
@ -222,7 +239,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
}
sha1 := file.GetHash().GetHash(utils.SHA1)
if len(sha1) != utils.SHA1.Width {
_, sha1, err = stream.CacheFullInTempFileAndHash(file, utils.SHA1)
_, sha1, err = stream.CacheFullAndHash(file, &up, utils.SHA1)
if err != nil {
return err
}
@ -252,6 +269,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
return err
}
if resp.Status == 2 {
up(100)
return nil
}
// 2. two way verify
@ -286,6 +304,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
return err
}
if resp.Status == 2 {
up(100)
return nil
}
}
@ -302,6 +321,22 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
return nil
}
func (d *Open115) OfflineDownload(ctx context.Context, uris []string, dstDir model.Obj) ([]string, error) {
return d.client.AddOfflineTaskURIs(ctx, uris, dstDir.GetID())
}
func (d *Open115) DeleteOfflineTask(ctx context.Context, infoHash string, deleteFiles bool) error {
return d.client.DeleteOfflineTask(ctx, infoHash, deleteFiles)
}
func (d *Open115) OfflineList(ctx context.Context) (*sdk.OfflineTaskListResp, error) {
resp, err := d.client.OfflineTaskList(ctx, 1)
if err != nil {
return nil, err
}
return resp, nil
}
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
// return nil, errs.NotImplement

View File

@ -11,23 +11,14 @@ type Addition struct {
// define other
OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
LimitRate float64 `json:"limit_rate,string" type:"float" default:"1" help:"limit all api request rate ([limit]r/1s)"`
LimitRate float64 `json:"limit_rate" type:"float" default:"1" help:"limit all api request rate ([limit]r/1s)"`
AccessToken string `json:"access_token" required:"true"`
RefreshToken string `json:"refresh_token" required:"true"`
}
var config = driver.Config{
Name: "115 Open",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
Name: "115 Open",
DefaultRoot: "0",
}
func init() {

View File

@ -6,12 +6,13 @@ import (
"io"
"time"
sdk "github.com/OpenListTeam/115-sdk-go"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/avast/retry-go"
sdk "github.com/OpenListTeam/115-sdk-go"
)
func calPartSize(fileSize int64) int64 {
@ -69,9 +70,6 @@ func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp
// }
func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
fileSize := stream.GetSize()
chunkSize := calPartSize(fileSize)
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
if err != nil {
return err
@ -86,6 +84,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
return err
}
fileSize := stream.GetSize()
chunkSize := calPartSize(fileSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize), &up)
if err != nil {
return err
}
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
parts := make([]oss.UploadPart, partNum)
offset := int64(0)
@ -98,10 +103,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
if i == partNum {
partSize = fileSize - (i-1)*chunkSize
}
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
rd, err := ss.GetSectionReader(offset, partSize)
if err != nil {
return err
}
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
err = retry.Do(func() error {
_ = rd.Reset()
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
rd.Seek(0, io.SeekStart)
part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i))
if err != nil {
return err
@ -112,6 +120,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
ss.FreeSectionReader(rd)
if err != nil {
return err
}
@ -121,7 +130,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
} else {
offset += partSize
}
up(float64(offset) / float64(fileSize))
up(float64(offset) * 100 / float64(fileSize))
}
// callbackRespBytes := make([]byte, 1024)

View File

@ -19,12 +19,7 @@ type Addition struct {
var config = driver.Config{
Name: "115 Share",
DefaultRoot: "0",
// OnlyProxy: true,
// OnlyLocal: true,
CheckStatus: false,
Alert: "",
NoOverwriteUpload: true,
NoUpload: true,
NoUpload: true,
}
func init() {

View File

@ -64,14 +64,6 @@ func (d *Pan123) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if f, ok := file.(File); ok {
//var resp DownResp
var headers map[string]string
if !utils.IsLocalIPAddr(args.IP) {
headers = map[string]string{
//"X-Real-IP": "1.1.1.1",
"X-Forwarded-For": args.IP,
}
}
data := base.Json{
"driveId": 0,
"etag": f.Etag,
@ -83,25 +75,27 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}
resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetHeaders(headers)
req.SetBody(data)
}, nil)
if err != nil {
return nil, err
}
downloadUrl := utils.Json.Get(resp, "data", "DownloadUrl").ToString()
u, err := url.Parse(downloadUrl)
ou, err := url.Parse(downloadUrl)
if err != nil {
return nil, err
}
nu := u.Query().Get("params")
u_ := ou.String()
nu := ou.Query().Get("params")
if nu != "" {
du, _ := base64.StdEncoding.DecodeString(nu)
u, err = url.Parse(string(du))
u, err := url.Parse(string(du))
if err != nil {
return nil, err
}
u_ = u.String()
}
u_ := u.String()
log.Debug("download url: ", u_)
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
if err != nil {
@ -118,7 +112,7 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
}
link.Header = http.Header{
"Referer": []string{"https://www.123pan.com/"},
"Referer": []string{fmt.Sprintf("%s://%s/", ou.Scheme, ou.Host)},
}
return &link, nil
} else {
@ -188,7 +182,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
etag := file.GetHash().GetHash(utils.MD5)
var err error
if len(etag) < utils.MD5.Width {
_, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5)
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
if err != nil {
return err
}

View File

@ -11,7 +11,8 @@ type Addition struct {
driver.RootID
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
AccessToken string
AccessToken string
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
}
var config = driver.Config{
@ -22,6 +23,11 @@ var config = driver.Config{
func init() {
op.RegisterDriver(func() driver.Driver {
return &Pan123{}
// 新增默认选项 要在RegisterDriver初始化设置 才会对正在使用的用户生效
return &Pan123{
Addition: Addition{
UploadThread: 3,
},
}
})
}

View File

@ -6,11 +6,16 @@ import (
"io"
"net/http"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
)
@ -69,18 +74,21 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
}
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
tmpF, err := file.CacheFullInTempFile()
// fetch s3 pre signed urls
size := file.GetSize()
chunkSize := int64(16 * utils.MB)
chunkCount := 1
if size > chunkSize {
chunkCount = int((size + chunkSize - 1) / chunkSize)
}
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return err
}
// fetch s3 pre signed urls
size := file.GetSize()
chunkSize := min(size, 16*utils.MB)
chunkCount := int(size / chunkSize)
lastChunkSize := size % chunkSize
if lastChunkSize > 0 {
chunkCount++
} else {
if lastChunkSize == 0 {
lastChunkSize = chunkSize
}
// only 1 batch is allowed
@ -90,73 +98,99 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
batchSize = 10
getS3UploadUrl = d.getS3PreSignedUrls
}
thread := min(int(chunkCount), d.UploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for i := 1; i <= chunkCount; i += batchSize {
if utils.IsCanceled(ctx) {
return ctx.Err()
if utils.IsCanceled(uploadCtx) {
break
}
start := i
end := min(i+batchSize, chunkCount+1)
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end)
s3PreSignedUrls, err := getS3UploadUrl(uploadCtx, upReq, start, end)
if err != nil {
return err
}
// upload each chunk
for j := start; j < end; j++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
for cur := start; cur < end; cur++ {
if utils.IsCanceled(uploadCtx) {
break
}
offset := int64(cur-1) * chunkSize
curSize := chunkSize
if j == chunkCount {
if cur == chunkCount {
curSize = lastChunkSize
}
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.NewSectionReader(tmpF, chunkSize*int64(j-1), curSize), curSize, false, getS3UploadUrl)
if err != nil {
return err
}
up(float64(j) * 100 / float64(chunkCount))
var reader *stream.SectionReader
var rateLimitedRd io.Reader
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
reader, err = ss.GetSectionReader(offset, curSize)
if err != nil {
return err
}
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
return nil
},
Do: func(ctx context.Context) error {
reader.Seek(0, io.SeekStart)
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
if uploadUrl == "" {
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
}
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, rateLimitedRd)
if err != nil {
return err
}
req.ContentLength = curSize
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode == http.StatusForbidden {
singleflight.AnyGroup.Do(fmt.Sprintf("Pan123.newUpload_%p", threadG), func() (any, error) {
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
if err != nil {
return nil, err
}
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
return nil, nil
})
if err != nil {
return err
}
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
}
if res.StatusCode != http.StatusOK {
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
}
progress := 10.0 + 85.0*float64(threadG.Success())/float64(chunkCount)
up(progress)
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
})
}
}
if err := threadG.Wait(); err != nil {
return err
}
defer up(100)
// complete s3 upload
return d.completeS3(ctx, upReq, file, chunkCount > 1)
}
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader *io.SectionReader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
if uploadUrl == "" {
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
}
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, reader))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = curSize
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode == http.StatusForbidden {
if retry {
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
}
// refresh s3 pre signed urls
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
if err != nil {
return err
}
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
// retry
reader.Seek(0, io.SeekStart)
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
}
if res.StatusCode != http.StatusOK {
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
}
return nil
}

View File

@ -2,7 +2,9 @@ package _123_open
import (
"context"
"fmt"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
@ -67,13 +69,45 @@ func (d *Open123) List(ctx context.Context, dir model.Obj, args model.ListArgs)
func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
fileId, _ := strconv.ParseInt(file.GetID(), 10, 64)
if d.DirectLink {
res, err := d.getDirectLink(fileId)
if err != nil {
return nil, err
}
if d.DirectLinkPrivateKey == "" {
duration := 365 * 24 * time.Hour // 缓存1年
return &model.Link{
URL: res.Data.URL,
Expiration: &duration,
}, nil
}
u, err := d.getUserInfo()
if err != nil {
return nil, err
}
duration := time.Duration(d.DirectLinkValidDuration) * time.Minute
newURL, err := d.SignURL(res.Data.URL, d.DirectLinkPrivateKey,
u.Data.UID, duration)
if err != nil {
return nil, err
}
return &model.Link{
URL: newURL,
Expiration: &duration,
}, nil
}
res, err := d.getDownloadInfo(fileId)
if err != nil {
return nil, err
}
link := model.Link{URL: res.Data.DownloadUrl}
return &link, nil
return &model.Link{URL: res.Data.DownloadUrl}, nil
}
func (d *Open123) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
@ -95,6 +129,22 @@ func (d *Open123) Rename(ctx context.Context, srcObj model.Obj, newName string)
}
func (d *Open123) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
// 尝试使用上传+MD5秒传功能实现复制
// 1. 创建文件
// parentFileID 父目录id上传到根目录时填写 0
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
if err != nil {
return fmt.Errorf("parse parentFileID error: %v", err)
}
etag := srcObj.(File).Etag
createResp, err := d.create(parentFileId, srcObj.GetName(), etag, srcObj.GetSize(), 2, false)
if err != nil {
return err
}
// 是否秒传
if createResp.Data.Reuse {
return nil
}
return errs.NotSupport
}
@ -104,26 +154,64 @@ func (d *Open123) Remove(ctx context.Context, obj model.Obj) error {
return d.trash(fileId)
}
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// 1. 创建文件
// parentFileID 父目录id上传到根目录时填写 0
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("parse parentFileID error: %v", err)
}
// etag 文件md5
etag := file.GetHash().GetHash(utils.MD5)
if len(etag) < utils.MD5.Width {
_, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5)
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
if err != nil {
return err
return nil, err
}
}
createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false)
if err != nil {
return err
return nil, err
}
// 是否秒传
if createResp.Data.Reuse {
return nil
// 秒传成功才会返回正确的 FileID否则为 0
if createResp.Data.FileID != 0 {
return File{
FileName: file.GetName(),
Size: file.GetSize(),
FileId: createResp.Data.FileID,
Type: 2,
Etag: etag,
}, nil
}
}
up(10)
return d.Upload(ctx, file, createResp, up)
// 2. 上传分片
err = d.Upload(ctx, file, createResp, up)
if err != nil {
return nil, err
}
// 3. 上传完毕
for range 60 {
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
// 返回错误代码未知20103文档也没有具体说
if err == nil && uploadCompleteResp.Data.Completed && uploadCompleteResp.Data.FileID != 0 {
up(100)
return File{
FileName: file.GetName(),
Size: file.GetSize(),
FileId: uploadCompleteResp.Data.FileID,
Type: 2,
Etag: etag,
}, nil
}
// 若接口返回的completed为 false 时则需间隔1秒继续轮询此接口获取上传最终结果。
time.Sleep(time.Second)
}
return nil, fmt.Errorf("upload complete timeout")
}
var _ driver.Driver = (*Open123)(nil)
var _ driver.PutResult = (*Open123)(nil)

View File

@ -23,6 +23,11 @@ type Addition struct {
// 上传线程数
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
// 使用直链
DirectLink bool `json:"DirectLink" type:"bool" default:"false" required:"false" help:"use direct link when download file"`
DirectLinkPrivateKey string `json:"DirectLinkPrivateKey" required:"false" help:"private key for direct link, if URL authentication is enabled"`
DirectLinkValidDuration int64 `json:"DirectLinkValidDuration" type:"number" default:"30" required:"false" help:"minutes, if URL authentication is enabled"`
driver.RootID
}

View File

@ -73,7 +73,9 @@ func (f File) GetName() string {
}
func (f File) CreateTime() time.Time {
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.CreateAt)
// 返回的时间没有时区信息,默认 UTC+8
loc := time.FixedZone("UTC+8", 8*60*60)
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.CreateAt, loc)
if err != nil {
return time.Now()
}
@ -81,7 +83,9 @@ func (f File) CreateTime() time.Time {
}
func (f File) ModTime() time.Time {
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.UpdateAt)
// 返回的时间没有时区信息,默认 UTC+8
loc := time.FixedZone("UTC+8", 8*60*60)
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.UpdateAt, loc)
if err != nil {
return time.Now()
}
@ -123,7 +127,7 @@ type RefreshTokenResp struct {
type UserInfoResp struct {
BaseResp
Data struct {
UID int64 `json:"uid"`
UID uint64 `json:"uid"`
Username string `json:"username"`
DisplayName string `json:"displayName"`
HeadImage string `json:"headImage"`
@ -154,52 +158,30 @@ type DownloadInfoResp struct {
} `json:"data"`
}
type DirectLinkResp struct {
BaseResp
Data struct {
URL string `json:"url"`
} `json:"data"`
}
// 创建文件V2返回
type UploadCreateResp struct {
BaseResp
Data struct {
FileID int64 `json:"fileID"`
PreuploadID string `json:"preuploadID"`
Reuse bool `json:"reuse"`
SliceSize int64 `json:"sliceSize"`
FileID int64 `json:"fileID"`
PreuploadID string `json:"preuploadID"`
Reuse bool `json:"reuse"`
SliceSize int64 `json:"sliceSize"`
Servers []string `json:"servers"`
} `json:"data"`
}
type UploadUrlResp struct {
BaseResp
Data struct {
PresignedURL string `json:"presignedURL"`
}
}
// 上传完毕V2返回
type UploadCompleteResp struct {
BaseResp
Data struct {
Async bool `json:"async"`
Completed bool `json:"completed"`
FileID int64 `json:"fileID"`
} `json:"data"`
}
type UploadAsyncResp struct {
BaseResp
Data struct {
Completed bool `json:"completed"`
FileID int64 `json:"fileID"`
} `json:"data"`
}
type UploadResp struct {
BaseResp
Data struct {
AccessKeyId string `json:"AccessKeyId"`
Bucket string `json:"Bucket"`
Key string `json:"Key"`
SecretAccessKey string `json:"SecretAccessKey"`
SessionToken string `json:"SessionToken"`
FileId int64 `json:"FileId"`
Reuse bool `json:"Reuse"`
EndPoint string `json:"EndPoint"`
StorageNode string `json:"StorageNode"`
UploadId string `json:"UploadId"`
} `json:"data"`
}

View File

@ -1,21 +1,28 @@
package _123_open
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"strconv"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
)
// 创建文件 V2
func (d *Open123) create(parentFileID int64, filename string, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) {
var resp UploadCreateResp
_, err := d.Request(UploadCreate, http.MethodPost, func(req *resty.Request) {
@ -34,21 +41,136 @@ func (d *Open123) create(parentFileID int64, filename string, etag string, size
return &resp, nil
}
func (d *Open123) url(preuploadID string, sliceNo int64) (string, error) {
// get upload url
var resp UploadUrlResp
_, err := d.Request(UploadUrl, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"preuploadId": preuploadID,
"sliceNo": sliceNo,
})
}, &resp)
// 上传分片 V2
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
uploadDomain := createResp.Data.Servers[0]
size := file.GetSize()
chunkSize := createResp.Data.SliceSize
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return "", err
return err
}
return resp.Data.PresignedURL, nil
uploadNums := (size + chunkSize - 1) / chunkSize
thread := min(int(uploadNums), d.UploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for partIndex := range uploadNums {
if utils.IsCanceled(uploadCtx) {
break
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片号从1开始
offset := partIndex * chunkSize
size := min(chunkSize, size-offset)
var reader *stream.SectionReader
var rateLimitedRd io.Reader
sliceMD5 := ""
// 表单
b := bytes.NewBuffer(make([]byte, 0, 2048))
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
// 每个分片一个reader
reader, err = ss.GetSectionReader(offset, size)
if err != nil {
return err
}
// 计算当前分片的MD5
sliceMD5, err = utils.HashReader(utils.MD5, reader)
if err != nil {
return err
}
}
return nil
},
Do: func(ctx context.Context) error {
// 重置分片reader位置因为HashReader、上一次失败已经读取到分片EOF
reader.Seek(0, io.SeekStart)
b.Reset()
w := multipart.NewWriter(b)
// 添加表单字段
err = w.WriteField("preuploadID", createResp.Data.PreuploadID)
if err != nil {
return err
}
err = w.WriteField("sliceNo", strconv.FormatInt(partNumber, 10))
if err != nil {
return err
}
err = w.WriteField("sliceMD5", sliceMD5)
if err != nil {
return err
}
// 写入文件内容
_, err = w.CreateFormFile("slice", fmt.Sprintf("%s.part%d", file.GetName(), partNumber))
if err != nil {
return err
}
headSize := b.Len()
err = w.Close()
if err != nil {
return err
}
head := bytes.NewReader(b.Bytes()[:headSize])
tail := bytes.NewReader(b.Bytes()[headSize:])
rateLimitedRd = driver.NewLimitedUploadStream(ctx, io.MultiReader(head, reader, tail))
// 创建请求并设置header
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadDomain+"/upload/v2/file/slice", rateLimitedRd)
if err != nil {
return err
}
// 设置请求头
req.Header.Add("Authorization", "Bearer "+d.AccessToken)
req.Header.Add("Content-Type", w.FormDataContentType())
req.Header.Add("Platform", "open_platform")
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return fmt.Errorf("slice %d upload failed, status code: %d", partNumber, res.StatusCode)
}
var resp BaseResp
respBody, err := io.ReadAll(res.Body)
if err != nil {
return err
}
err = json.Unmarshal(respBody, &resp)
if err != nil {
return err
}
if resp.Code != 0 {
return fmt.Errorf("slice %d upload failed: %s", partNumber, resp.Message)
}
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
up(progress)
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
})
}
if err := threadG.Wait(); err != nil {
return err
}
return nil
}
// 上传完毕
func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
var resp UploadCompleteResp
_, err := d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
@ -61,91 +183,3 @@ func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
}
return &resp, nil
}
func (d *Open123) async(preuploadID string) (*UploadAsyncResp, error) {
var resp UploadAsyncResp
_, err := d.Request(UploadAsync, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"preuploadID": preuploadID,
})
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
size := file.GetSize()
chunkSize := createResp.Data.SliceSize
uploadNums := (size + chunkSize - 1) / chunkSize
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.UploadThread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for partIndex := int64(0); partIndex < uploadNums; partIndex++ {
if utils.IsCanceled(uploadCtx) {
return ctx.Err()
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片号从1开始
offset := partIndex * chunkSize
size := min(chunkSize, size-offset)
limitedReader, err := file.RangeRead(http_range.Range{
Start: offset,
Length: size})
if err != nil {
return err
}
limitedReader = driver.NewLimitedUploadStream(ctx, limitedReader)
threadG.Go(func(ctx context.Context) error {
uploadPartUrl, err := d.url(createResp.Data.PreuploadID, partNumber)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, "PUT", uploadPartUrl, limitedReader)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = size
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
_ = res.Body.Close()
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
up(progress)
return nil
})
}
if err := threadG.Wait(); err != nil {
return err
}
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
if err != nil {
return err
}
if uploadCompleteResp.Data.Async == false || uploadCompleteResp.Data.Completed {
return nil
}
for {
uploadAsyncResp, err := d.async(createResp.Data.PreuploadID)
if err != nil {
return err
}
if uploadAsyncResp.Data.Completed {
break
}
}
up(100)
return nil
}

View File

@ -1,15 +1,20 @@
package _123_open
import (
"crypto/md5"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/go-resty/resty/v2"
"github.com/google/uuid"
log "github.com/sirupsen/logrus"
)
@ -19,16 +24,15 @@ var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于
AccessToken = InitApiInfo(Api+"/api/v1/access_token", 1)
RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1)
UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1)
FileList = InitApiInfo(Api+"/api/v2/file/list", 4)
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 0)
FileList = InitApiInfo(Api+"/api/v2/file/list", 3)
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 5)
DirectLink = InitApiInfo(Api+"/api/v1/direct-link/url", 5)
Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2)
Move = InitApiInfo(Api+"/api/v1/file/move", 1)
Rename = InitApiInfo(Api+"/api/v1/file/name", 1)
Trash = InitApiInfo(Api+"/api/v1/file/trash", 2)
UploadCreate = InitApiInfo(Api+"/upload/v1/file/create", 2)
UploadUrl = InitApiInfo(Api+"/upload/v1/file/get_upload_url", 0)
UploadComplete = InitApiInfo(Api+"/upload/v1/file/upload_complete", 0)
UploadAsync = InitApiInfo(Api+"/upload/v1/file/upload_async_result", 1)
UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2)
UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0)
)
func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
@ -82,8 +86,24 @@ func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCall
}
func (d *Open123) flushAccessToken() error {
if d.Addition.ClientID != "" {
if d.Addition.ClientSecret != "" {
if d.ClientID != "" {
if d.RefreshToken != "" {
var resp RefreshTokenResp
_, err := d.Request(RefreshToken, http.MethodPost, func(req *resty.Request) {
req.SetQueryParam("client_id", d.ClientID)
if d.ClientSecret != "" {
req.SetQueryParam("client_secret", d.ClientSecret)
}
req.SetQueryParam("grant_type", "refresh_token")
req.SetQueryParam("refresh_token", d.RefreshToken)
}, &resp)
if err != nil {
return err
}
d.AccessToken = resp.AccessToken
d.RefreshToken = resp.RefreshToken
op.MustSaveDriverStorage(d)
} else if d.ClientSecret != "" {
var resp AccessTokenResp
_, err := d.Request(AccessToken, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
@ -96,24 +116,38 @@ func (d *Open123) flushAccessToken() error {
}
d.AccessToken = resp.Data.AccessToken
op.MustSaveDriverStorage(d)
} else if d.Addition.RefreshToken != "" {
var resp RefreshTokenResp
_, err := d.Request(RefreshToken, http.MethodPost, func(req *resty.Request) {
req.SetQueryParam("client_id", d.ClientID)
req.SetQueryParam("grant_type", "refresh_token")
req.SetQueryParam("refresh_token", d.Addition.RefreshToken)
}, &resp)
if err != nil {
return err
}
d.AccessToken = resp.AccessToken
d.RefreshToken = resp.RefreshToken
op.MustSaveDriverStorage(d)
}
}
return nil
}
func (d *Open123) SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (newURL string, err error) {
// 生成Unix时间戳
ts := time.Now().Add(validDuration).Unix()
// 生成随机数建议使用UUID不能包含中划线-
rand := strings.ReplaceAll(uuid.New().String(), "-", "")
// 解析URL
objURL, err := url.Parse(originURL)
if err != nil {
return "", err
}
// 待签名字符串格式path-timestamp-rand-uid-privateKey
unsignedStr := fmt.Sprintf("%s-%d-%s-%d-%s", objURL.Path, ts, rand, uid, privateKey)
md5Hash := md5.Sum([]byte(unsignedStr))
// 生成鉴权参数格式timestamp-rand-uid-md5hash
authKey := fmt.Sprintf("%d-%s-%d-%x", ts, rand, uid, md5Hash)
// 添加鉴权参数到URL查询参数
v := objURL.Query()
v.Add("auth_key", authKey)
objURL.RawQuery = v.Encode()
return objURL.String(), nil
}
func (d *Open123) getUserInfo() (*UserInfoResp, error) {
var resp UserInfoResp
@ -161,6 +195,21 @@ func (d *Open123) getDownloadInfo(fileId int64) (*DownloadInfoResp, error) {
return &resp, nil
}
func (d *Open123) getDirectLink(fileId int64) (*DirectLinkResp, error) {
var resp DirectLinkResp
_, err := d.Request(DirectLink, http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"fileId": strconv.FormatInt(fileId, 10),
})
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Open123) mkdir(parentID int64, name string) error {
_, err := d.Request(Mkdir, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{

View File

@ -70,14 +70,6 @@ func (d *Pan123Share) List(ctx context.Context, dir model.Obj, args model.ListAr
func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
// TODO return link of file, required
if f, ok := file.(File); ok {
//var resp DownResp
var headers map[string]string
if !utils.IsLocalIPAddr(args.IP) {
headers = map[string]string{
//"X-Real-IP": "1.1.1.1",
"X-Forwarded-For": args.IP,
}
}
data := base.Json{
"shareKey": d.ShareKey,
"SharePwd": d.SharePwd,
@ -87,25 +79,27 @@ func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkA
"size": f.Size,
}
resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetHeaders(headers)
req.SetBody(data)
}, nil)
if err != nil {
return nil, err
}
downloadUrl := utils.Json.Get(resp, "data", "DownloadURL").ToString()
u, err := url.Parse(downloadUrl)
ou, err := url.Parse(downloadUrl)
if err != nil {
return nil, err
}
nu := u.Query().Get("params")
u_ := ou.String()
nu := ou.Query().Get("params")
if nu != "" {
du, _ := base64.StdEncoding.DecodeString(nu)
u, err = url.Parse(string(du))
u, err := url.Parse(string(du))
if err != nil {
return nil, err
}
u_ = u.String()
}
u_ := u.String()
log.Debug("download url: ", u_)
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
if err != nil {
@ -122,7 +116,7 @@ func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkA
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
}
link.Header = http.Header{
"Referer": []string{"https://www.123pan.com/"},
"Referer": []string{fmt.Sprintf("%s://%s/", ou.Scheme, ou.Host)},
}
return &link, nil
}

View File

@ -15,17 +15,10 @@ type Addition struct {
}
var config = driver.Config{
Name: "123PanShare",
LocalSort: true,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: true,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
Name: "123PanShare",
LocalSort: true,
NoUpload: true,
DefaultRoot: "0",
}
func init() {

View File

@ -522,30 +522,27 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
var err error
fullHash := stream.GetHash().GetHash(utils.SHA256)
if len(fullHash) != utils.SHA256.Width {
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA256)
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA256)
if err != nil {
return err
}
}
size := stream.GetSize()
var partSize = d.getPartSize(size)
part := size / partSize
if size%partSize > 0 {
part++
} else if part == 0 {
part = 1
partSize := d.getPartSize(size)
part := int64(1)
if size > partSize {
part = (size + partSize - 1) / partSize
}
// 生成所有 partInfos
partInfos := make([]PartInfo, 0, part)
for i := int64(0); i < part; i++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
start := i * partSize
byteSize := size - start
if byteSize > partSize {
byteSize = partSize
}
byteSize := min(size-start, partSize)
partNumber := i + 1
partInfo := PartInfo{
PartNumber: partNumber,
@ -593,17 +590,20 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
// resp.Data.RapidUpload: true 支持快传,但此处直接检测是否返回分片的上传地址
// 快传的情况下同样需要手动处理冲突
if resp.Data.PartInfos != nil {
// 读取前100个分片的上传地址
uploadPartInfos := resp.Data.PartInfos
// Progress
p := driver.NewProgress(size, up)
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
// 获取后续分片的上传地址
for i := 101; i < len(partInfos); i += 100 {
end := i + 100
if end > len(partInfos) {
end = len(partInfos)
}
// 先上传前100个分片
err = d.uploadPersonalParts(ctx, partInfos, resp.Data.PartInfos, rateLimited, p)
if err != nil {
return err
}
// 如果还有剩余分片,分批获取上传地址并上传
for i := 100; i < len(partInfos); i += 100 {
end := min(i+100, len(partInfos))
batchPartInfos := partInfos[i:end]
moredata := base.Json{
"fileId": resp.Data.FileId,
"uploadId": resp.Data.UploadId,
@ -619,45 +619,13 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
if err != nil {
return err
}
uploadPartInfos = append(uploadPartInfos, moreresp.Data.PartInfos...)
}
// Progress
p := driver.NewProgress(size, up)
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
// 上传所有分片
for _, uploadPartInfo := range uploadPartInfos {
index := uploadPartInfo.PartNumber - 1
partSize := partInfos[index].PartSize
log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos))
limitReader := io.LimitReader(rateLimited, partSize)
// Update Progress
r := io.TeeReader(limitReader, p)
req, err := http.NewRequest("PUT", uploadPartInfo.UploadUrl, r)
err = d.uploadPersonalParts(ctx, partInfos, moreresp.Data.PartInfos, rateLimited, p)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", fmt.Sprint(partSize))
req.Header.Set("Origin", "https://yun.139.com")
req.Header.Set("Referer", "https://yun.139.com/")
req.ContentLength = partSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
_ = res.Body.Close()
log.Debugf("[139] uploaded: %+v", res)
if res.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
}
}
// 全部分片上传完毕后complete
data = base.Json{
"contentHash": fullHash,
"contentHashAlgorithm": "SHA256",
@ -786,12 +754,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
size := stream.GetSize()
// Progress
p := driver.NewProgress(size, up)
var partSize = d.getPartSize(size)
part := size / partSize
if size%partSize > 0 {
part++
} else if part == 0 {
part = 1
partSize := d.getPartSize(size)
part := int64(1)
if size > partSize {
part = (size + partSize - 1) / partSize
}
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
for i := int64(0); i < part; i++ {
@ -805,12 +771,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
limitReader := io.LimitReader(rateLimited, byteSize)
// Update Progress
r := io.TeeReader(limitReader, p)
req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, resp.Data.UploadResult.RedirectionURL, r)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName()))
req.Header.Set("contentSize", strconv.FormatInt(size, 10))
req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1))

View File

@ -1,9 +1,11 @@
package _139
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
@ -13,6 +15,7 @@ import (
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
@ -623,3 +626,47 @@ func (d *Yun139) getPersonalCloudHost() string {
}
return d.PersonalCloudHost
}
func (d *Yun139) uploadPersonalParts(ctx context.Context, partInfos []PartInfo, uploadPartInfos []PersonalPartInfo, rateLimited *driver.RateLimitReader, p *driver.Progress) error {
// 确保数组以 PartNumber 从小到大排序
sort.Slice(uploadPartInfos, func(i, j int) bool {
return uploadPartInfos[i].PartNumber < uploadPartInfos[j].PartNumber
})
for _, uploadPartInfo := range uploadPartInfos {
index := uploadPartInfo.PartNumber - 1
if index < 0 || index >= len(partInfos) {
return fmt.Errorf("invalid PartNumber %d: index out of bounds (partInfos length: %d)", uploadPartInfo.PartNumber, len(partInfos))
}
partSize := partInfos[index].PartSize
log.Debugf("[139] uploading part %+v/%+v", index, len(partInfos))
limitReader := io.LimitReader(rateLimited, partSize)
r := io.TeeReader(limitReader, p)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartInfo.UploadUrl, r)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", fmt.Sprint(partSize))
req.Header.Set("Origin", "https://yun.139.com")
req.Header.Set("Referer", "https://yun.139.com/")
req.ContentLength = partSize
err = func() error {
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
log.Debugf("[139] uploaded: %+v", res)
if res.StatusCode != http.StatusOK {
body, _ := io.ReadAll(res.Body)
return fmt.Errorf("unexpected status code: %d, body: %s", res.StatusCode, string(body))
}
return nil
}()
if err != nil {
return err
}
}
return nil
}

View File

@ -365,11 +365,10 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
log.Debugf("uploadData: %+v", uploadData)
requestURL := uploadData.RequestURL
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
req, err := http.NewRequestWithContext(ctx, http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
for _, v := range uploadHeaders {
i := strings.Index(v, "=")
req.Header.Set(v[0:i], v[i+1:])

View File

@ -5,17 +5,19 @@ import (
"encoding/base64"
"encoding/xml"
"fmt"
"github.com/skip2/go-qrcode"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/skip2/go-qrcode"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
@ -129,6 +131,7 @@ func (y *Cloud189TV) put(ctx context.Context, url string, headers map[string]str
}
}
// 请求完成后http.Client会Close Request.Body
resp, err := base.HttpClient.Do(req)
if err != nil {
return nil, err
@ -311,11 +314,14 @@ func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
// 旧版本上传,家庭云不支持覆盖
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, err
fileMd5 := file.GetHash().GetHash(utils.MD5)
var tempFile = file.GetFile()
var err error
if len(fileMd5) != utils.MD5.Width {
tempFile, fileMd5, err = stream.CacheFullAndHash(file, &up, utils.MD5)
} else if tempFile == nil {
tempFile, err = file.CacheFullAndWriter(&up, nil)
}
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
if err != nil {
return nil, err
}
@ -328,6 +334,10 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
// 网盘中不存在该文件,开始上传
status := GetUploadFileStatusResp{CreateUploadFileResp: *uploadInfo}
// driver.RateLimitReader会尝试Close底层的reader
// 但这里的tempFile是一个*os.FileClose后就没法继续读了
// 所以这里用io.NopCloser包一层
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile))
for status.GetSize() < file.GetSize() && status.FileDataExists != 1 {
if utils.IsCanceled(ctx) {
return nil, ctx.Err()
@ -345,7 +355,7 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
}
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily)
_, err := y.put(ctx, status.FileUploadUrl, header, true, rateLimitedRd, isFamily)
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
return nil, err
}

View File

@ -7,6 +7,7 @@ import (
"encoding/hex"
"encoding/xml"
"fmt"
"hash"
"io"
"net/http"
"net/http/cookiejar"
@ -471,14 +472,16 @@ func (y *Cloud189PC) refreshSession() (err error) {
// 普通上传
// 无法上传大小为0的文件
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
size := file.GetSize()
sliceSize := partSize(size)
// 文件大小
fileSize := file.GetSize()
// 分片大小,不得为文件大小
sliceSize := partSize(fileSize)
params := Params{
"parentFolderId": dstDir.GetID(),
"fileName": url.QueryEscape(file.GetName()),
"fileSize": fmt.Sprint(file.GetSize()),
"sliceSize": fmt.Sprint(sliceSize),
"fileSize": fmt.Sprint(fileSize),
"sliceSize": fmt.Sprint(sliceSize), // 必须为特定分片大小
"lazyCheck": "1",
}
@ -500,66 +503,101 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
return nil, err
}
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
ss, err := stream.NewStreamSectionReader(file, int(sliceSize), &up)
if err != nil {
return nil, err
}
threadG, upCtx := errgroup.NewOrderedGroupWithContext(ctx, y.uploadThread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
count := int(size / sliceSize)
lastPartSize := size % sliceSize
if lastPartSize > 0 {
count++
} else {
count := 1
if fileSize > sliceSize {
count = int((fileSize + sliceSize - 1) / sliceSize)
}
lastPartSize := fileSize % sliceSize
if lastPartSize == 0 {
lastPartSize = sliceSize
}
fileMd5 := utils.MD5.NewFunc()
silceMd5 := utils.MD5.NewFunc()
silceMd5Hexs := make([]string, 0, count)
teeReader := io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5))
byteSize := sliceSize
silceMd5 := utils.MD5.NewFunc()
var writers io.Writer = silceMd5
fileMd5Hex := file.GetHash().GetHash(utils.MD5)
var fileMd5 hash.Hash
if len(fileMd5Hex) != utils.MD5.Width {
fileMd5 = utils.MD5.NewFunc()
writers = io.MultiWriter(silceMd5, fileMd5)
}
for i := 1; i <= count; i++ {
if utils.IsCanceled(upCtx) {
break
}
offset := int64((i)-1) * sliceSize
partSize := sliceSize
if i == count {
byteSize = lastPartSize
}
byteData := make([]byte, byteSize)
// 读取块
silceMd5.Reset()
if _, err := io.ReadFull(teeReader, byteData); err != io.EOF && err != nil {
return nil, err
partSize = lastPartSize
}
partInfo := ""
var reader *stream.SectionReader
var rateLimitedRd io.Reader
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
reader, err = ss.GetSectionReader(offset, partSize)
if err != nil {
return err
}
silceMd5.Reset()
w, err := utils.CopyWithBuffer(writers, reader)
if w != partSize {
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", partSize, w, err)
}
// 计算块md5并进行hex和base64编码
md5Bytes := silceMd5.Sum(nil)
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
partInfo = fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
// 计算块md5并进行hex和base64编码
md5Bytes := silceMd5.Sum(nil)
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
return nil
},
Do: func(ctx context.Context) error {
reader.Seek(0, io.SeekStart)
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
if err != nil {
return err
}
threadG.Go(func(ctx context.Context) error {
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
if err != nil {
return err
}
// step.4 上传切片
uploadUrl := uploadUrls[0]
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily)
if err != nil {
return err
}
up(float64(threadG.Success()) * 100 / float64(count))
return nil
})
// step.4 上传切片
uploadUrl := uploadUrls[0]
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
driver.NewLimitedUploadStream(ctx, rateLimitedRd), isFamily)
if err != nil {
return err
}
up(float64(threadG.Success()) * 100 / float64(count))
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
},
)
}
if err = threadG.Wait(); err != nil {
return nil, err
}
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
if fileMd5 != nil {
fileMd5Hex = strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
}
sliceMd5Hex := fileMd5Hex
if file.GetSize() > sliceSize {
if fileSize > sliceSize {
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
}
@ -620,11 +658,12 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
cache = tmpF
}
sliceSize := partSize(size)
count := int(size / sliceSize)
count := 1
if size > sliceSize {
count = int((size + sliceSize - 1) / sliceSize)
}
lastSliceSize := size % sliceSize
if lastSliceSize > 0 {
count++
} else {
if lastSliceSize == 0 {
lastSliceSize = sliceSize
}
@ -738,7 +777,8 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
}
// step.4 上传切片
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(cache, offset, byteSize), isFamily)
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize))
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily)
if err != nil {
return err
}
@ -820,7 +860,7 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo
// 旧版本上传,家庭云不支持覆盖
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
tempFile, fileMd5, err := stream.CacheFullInTempFileAndHash(file, utils.MD5)
tempFile, fileMd5, err := stream.CacheFullAndHash(file, &up, utils.MD5)
if err != nil {
return nil, err
}

View File

@ -3,7 +3,9 @@ package alias
import (
"context"
"errors"
"fmt"
"io"
"net/url"
stdpath "path"
"strings"
@ -11,8 +13,11 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
)
type Alias struct {
@ -75,10 +80,18 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
obj, err := d.get(ctx, path, dst, sub)
if err == nil {
return obj, nil
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
if err != nil {
continue
}
return &model.Object{
Path: path,
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}, nil
}
return nil, errs.ObjectNotFound
}
@ -96,7 +109,27 @@ func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
var objs []model.Obj
fsArgs := &fs.ListArgs{NoLog: true, Refresh: args.Refresh}
for _, dst := range dsts {
tmp, err := d.list(ctx, dst, sub, fsArgs)
tmp, err := fs.List(ctx, stdpath.Join(dst, sub), fsArgs)
if err == nil {
tmp, err = utils.SliceConvert(tmp, func(obj model.Obj) (model.Obj, error) {
thumb, ok := model.GetThumb(obj)
objRes := model.Object{
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
}
if !ok {
return &objRes, nil
}
return &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
}, nil
})
}
if err == nil {
objs = append(objs, tmp...)
}
@ -110,22 +143,45 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if !ok {
return nil, errs.ObjectNotFound
}
// proxy || ftp,s3
if common.GetApiUrl(ctx) == "" {
args.Redirect = false
}
for _, dst := range dsts {
link, err := d.link(ctx, dst, sub, args)
if err == nil {
link.Expiration = nil // 去除非必要缓存d.link里op.Lin有缓存
if !args.Redirect && len(link.URL) > 0 {
// 正常情况下 多并发 仅支持返回URL的驱动
// alias套娃alias 可以让crypt、mega等驱动(不返回URL的) 支持并发
if d.DownloadConcurrency > 0 {
link.Concurrency = d.DownloadConcurrency
}
if d.DownloadPartSize > 0 {
link.PartSize = d.DownloadPartSize * utils.KB
}
}
return link, nil
reqPath := stdpath.Join(dst, sub)
link, fi, err := d.link(ctx, reqPath, args)
if err != nil {
continue
}
if link == nil {
// 重定向且需要通过代理
return &model.Link{
URL: fmt.Sprintf("%s/p%s?sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
sign.Sign(reqPath)),
}, nil
}
resultLink := *link
resultLink.SyncClosers = utils.NewSyncClosers(link)
if args.Redirect {
return &resultLink, nil
}
if resultLink.ContentLength == 0 {
resultLink.ContentLength = fi.GetSize()
}
if resultLink.MFile != nil {
return &resultLink, nil
}
if d.DownloadConcurrency > 0 {
resultLink.Concurrency = d.DownloadConcurrency
}
if d.DownloadPartSize > 0 {
resultLink.PartSize = d.DownloadPartSize * utils.KB
}
return &resultLink, nil
}
return nil, errs.ObjectNotFound
}
@ -167,7 +223,8 @@ func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
}
if len(srcPath) == len(dstPath) {
for i := range srcPath {
err = errors.Join(err, fs.Move(ctx, *srcPath[i], *dstPath[i]))
_, e := fs.Move(ctx, *srcPath[i], *dstPath[i])
err = errors.Join(err, e)
}
return err
} else {
@ -251,20 +308,29 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer,
reqPath, err := d.getReqPath(ctx, dstDir, true)
if err == nil {
if len(reqPath) == 1 {
return fs.PutDirectly(ctx, *reqPath[0], s)
} else {
defer s.Close()
file, err := s.CacheFullInTempFile()
storage, reqActualPath, err := op.GetStorageAndActualPath(*reqPath[0])
if err != nil {
return err
}
for _, path := range reqPath {
return op.Put(ctx, storage, reqActualPath, &stream.FileStream{
Obj: s,
Mimetype: s.GetMimetype(),
Reader: s,
}, up)
} else {
file, err := s.CacheFullAndWriter(nil, nil)
if err != nil {
return err
}
count := float64(len(reqPath) + 1)
up(100 / count)
for i, path := range reqPath {
err = errors.Join(err, fs.PutDirectly(ctx, *path, &stream.FileStream{
Obj: s,
Mimetype: s.GetMimetype(),
WebPutAsTask: s.NeedStore(),
Reader: file,
Obj: s,
Mimetype: s.GetMimetype(),
Reader: file,
}))
up(float64(i+2) / float64(count) * 100)
_, e := file.Seek(0, io.SeekStart)
if e != nil {
return errors.Join(err, e)
@ -336,18 +402,24 @@ func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveIn
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
link, err := d.extract(ctx, dst, sub, args)
if err == nil {
if !args.Redirect && len(link.URL) > 0 {
if d.DownloadConcurrency > 0 {
link.Concurrency = d.DownloadConcurrency
}
if d.DownloadPartSize > 0 {
link.PartSize = d.DownloadPartSize * utils.KB
}
}
return link, nil
reqPath := stdpath.Join(dst, sub)
link, err := d.extract(ctx, reqPath, args)
if err != nil {
continue
}
if link == nil {
return &model.Link{
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}, nil
}
resultLink := *link
resultLink.SyncClosers = utils.NewSyncClosers(link)
return &resultLink, nil
}
return nil, errs.NotImplement
}

View File

@ -2,8 +2,6 @@ package alias
import (
"context"
"fmt"
"net/url"
stdpath "path"
"strings"
@ -12,8 +10,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
)
@ -54,79 +50,22 @@ func (d *Alias) getRootAndPath(path string) (string, string) {
return parts[0], parts[1]
}
func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Obj, error) {
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
if err != nil {
return nil, err
}
return &model.Object{
Path: path,
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}, nil
}
func (d *Alias) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]model.Obj, error) {
objs, err := fs.List(ctx, stdpath.Join(dst, sub), args)
// the obj must implement the model.SetPath interface
// return objs, err
if err != nil {
return nil, err
}
return utils.SliceConvert(objs, func(obj model.Obj) (model.Obj, error) {
thumb, ok := model.GetThumb(obj)
objRes := model.Object{
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
}
if !ok {
return &objRes, nil
}
return &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
}, nil
})
}
func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs) (*model.Link, error) {
reqPath := stdpath.Join(dst, sub)
// 参考 crypt 驱动
func (d *Alias) link(ctx context.Context, reqPath string, args model.LinkArgs) (*model.Link, model.Obj, error) {
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil {
return nil, err
return nil, nil, err
}
useRawLink := len(common.GetApiUrl(ctx)) == 0 // ftp、s3
if !useRawLink {
_, ok := storage.(*Alias)
useRawLink = !ok && !args.Redirect
if !args.Redirect {
return op.Link(ctx, storage, reqActualPath, args)
}
if useRawLink {
link, _, err := op.Link(ctx, storage, reqActualPath, args)
return link, err
}
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil {
return nil, err
return nil, nil, err
}
if common.ShouldProxy(storage, stdpath.Base(sub)) {
link := &model.Link{
URL: fmt.Sprintf("%s/p%s?sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
sign.Sign(reqPath)),
}
return link, nil
if common.ShouldProxy(storage, stdpath.Base(reqPath)) {
return nil, obj, nil
}
link, _, err := op.Link(ctx, storage, reqActualPath, args)
return link, err
return op.Link(ctx, storage, reqActualPath, args)
}
func (d *Alias) getReqPath(ctx context.Context, obj model.Obj, isParent bool) ([]*string, error) {
@ -197,8 +136,7 @@ func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.Arc
return nil, errs.NotImplement
}
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) {
reqPath := stdpath.Join(dst, sub)
func (d *Alias) extract(ctx context.Context, reqPath string, args model.ArchiveInnerArgs) (*model.Link, error) {
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil {
return nil, err
@ -206,20 +144,12 @@ func (d *Alias) extract(ctx context.Context, dst, sub string, args model.Archive
if _, ok := storage.(driver.ArchiveReader); !ok {
return nil, errs.NotImplement
}
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(sub)) {
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil {
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(reqPath)) {
_, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err == nil {
return nil, err
}
link := &model.Link{
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}
return link, nil
return nil, nil
}
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
return link, err

View File

@ -165,7 +165,7 @@ func (d *AliDrive) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
file := stream.FileStream{
file := &stream.FileStream{
Obj: streamer,
Reader: streamer,
Mimetype: streamer.GetMimetype(),
@ -209,7 +209,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
io.Closer
}{
Reader: io.MultiReader(buf, file),
Closer: &file,
Closer: file,
}
}
} else {
@ -297,11 +297,10 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
if d.InternalUpload {
url = partInfo.InternalUploadUrl
}
req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT))
req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, io.LimitReader(rateLimited, DEFAULT))
if err != nil {
return err
}
req = req.WithContext(ctx)
res, err := base.HttpClient.Do(req)
if err != nil {
return err

View File

@ -3,7 +3,6 @@ package aliyundrive_open
import (
"context"
"errors"
"fmt"
"net/http"
"path/filepath"
"time"
@ -13,7 +12,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/rateg"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
@ -24,9 +22,8 @@ type AliyundriveOpen struct {
DriveId string
limitList func(ctx context.Context, data base.Json) (*Files, error)
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
ref *AliyundriveOpen
limiter *limiter
ref *AliyundriveOpen
}
func (d *AliyundriveOpen) Config() driver.Config {
@ -38,25 +35,23 @@ func (d *AliyundriveOpen) GetAddition() driver.Additional {
}
func (d *AliyundriveOpen) Init(ctx context.Context) error {
d.limiter = getLimiterForUser(globalLimiterUserID) // First create a globally shared limiter to limit the initial requests.
if d.LIVPDownloadFormat == "" {
d.LIVPDownloadFormat = "jpeg"
}
if d.DriveType == "" {
d.DriveType = "default"
}
res, err := d.request("/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
res, err := d.request(ctx, limiterOther, "/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
if err != nil {
d.limiter.free()
d.limiter = nil
return err
}
d.DriveId = utils.Json.Get(res, d.DriveType+"_drive_id").ToString()
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
Limit: 4,
Bucket: 1,
})
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
Limit: 1,
Bucket: 1,
})
userid := utils.Json.Get(res, "user_id").ToString()
d.limiter.free()
d.limiter = getLimiterForUser(userid) // Allocate a corresponding limiter for each user.
return nil
}
@ -70,6 +65,8 @@ func (d *AliyundriveOpen) InitReference(storage driver.Driver) error {
}
func (d *AliyundriveOpen) Drop(ctx context.Context) error {
d.limiter.free()
d.limiter = nil
d.ref = nil
return nil
}
@ -87,9 +84,6 @@ func (d *AliyundriveOpen) GetRoot(ctx context.Context) (model.Obj, error) {
}
func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if d.limitList == nil {
return nil, fmt.Errorf("driver not init")
}
files, err := d.getFiles(ctx, dir.GetID())
if err != nil {
return nil, err
@ -107,8 +101,8 @@ func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.Li
return objs, err
}
func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link, error) {
res, err := d.request("/adrive/v1.0/openFile/getDownloadUrl", http.MethodPost, func(req *resty.Request) {
func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
res, err := d.request(ctx, limiterLink, "/adrive/v1.0/openFile/getDownloadUrl", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": file.GetID(),
@ -132,17 +126,10 @@ func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link
}, nil
}
func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.limitLink == nil {
return nil, fmt.Errorf("driver not init")
}
return d.limitLink(ctx, file)
}
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
nowTime, _ := getNowTime()
newDir := File{CreatedAt: nowTime, UpdatedAt: nowTime}
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"parent_file_id": parentDir.GetID(),
@ -168,7 +155,7 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var resp MoveOrCopyResp
_, err := d.request("/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": srcObj.GetID(),
@ -198,7 +185,7 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (m
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
var newFile File
_, err := d.request("/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": srcObj.GetID(),
@ -230,7 +217,7 @@ func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
var resp MoveOrCopyResp
_, err := d.request("/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": srcObj.GetID(),
@ -256,7 +243,7 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
if d.RemoveWay == "delete" {
uri = "/adrive/v1.0/openFile/delete"
}
_, err := d.request(uri, http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, uri, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": obj.GetID(),
@ -295,7 +282,7 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
default:
return nil, errs.NotSupport
}
_, err := d.request(uri, http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, uri, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp)
})
if err != nil {

View File

@ -0,0 +1,96 @@
package aliyundrive_open
import (
"context"
"fmt"
"sync"
"golang.org/x/time/rate"
)
// See document https://www.yuque.com/aliyundrive/zpfszx/mqocg38hlxzc5vcd
// See issue https://github.com/OpenListTeam/OpenList/issues/724
// We got limit per user per app, so the limiter should be global.
type limiterType int
const (
limiterList limiterType = iota
limiterLink
limiterOther
)
const (
listRateLimit = 3.9 // 4 per second in document, but we use 3.9 per second to be safe
linkRateLimit = 0.9 // 1 per second in document, but we use 0.9 per second to be safe
otherRateLimit = 14.9 // 15 per second in document, but we use 14.9 per second to be safe
globalLimiterUserID = "" // Global limiter user ID, used to limit the initial requests.
)
type limiter struct {
usedBy int
list *rate.Limiter
link *rate.Limiter
other *rate.Limiter
}
var limiters = make(map[string]*limiter)
var limitersLock = &sync.Mutex{}
func getLimiterForUser(userid string) *limiter {
limitersLock.Lock()
defer limitersLock.Unlock()
defer func() {
// Clean up limiters that are no longer used.
for id, lim := range limiters {
if lim.usedBy <= 0 && id != globalLimiterUserID { // Do not delete the global limiter.
delete(limiters, id)
}
}
}()
if lim, ok := limiters[userid]; ok {
lim.usedBy++
return lim
}
lim := &limiter{
usedBy: 1,
list: rate.NewLimiter(rate.Limit(listRateLimit), 1),
link: rate.NewLimiter(rate.Limit(linkRateLimit), 1),
other: rate.NewLimiter(rate.Limit(otherRateLimit), 1),
}
limiters[userid] = lim
return lim
}
func (l *limiter) wait(ctx context.Context, typ limiterType) error {
if l == nil {
return fmt.Errorf("driver not init")
}
switch typ {
case limiterList:
return l.list.Wait(ctx)
case limiterLink:
return l.link.Wait(ctx)
case limiterOther:
return l.other.Wait(ctx)
default:
return fmt.Errorf("unknown limiter type")
}
}
func (l *limiter) free() {
if l == nil {
return
}
limitersLock.Lock()
defer limitersLock.Unlock()
l.usedBy--
}
func (d *AliyundriveOpen) wait(ctx context.Context, typ limiterType) error {
if d == nil {
return fmt.Errorf("driver not init")
}
if d.ref != nil {
return d.ref.wait(ctx, typ) // If this is a reference driver, wait on the reference driver.
}
return d.limiter.wait(ctx, typ)
}

View File

@ -12,6 +12,7 @@ type Addition struct {
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
UseOnlineAPI bool `json:"use_online_api" default:"true"`
AlipanType string `json:"alipan_type" required:"true" type:"select" default:"default" options:"default,alipanTV"`
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/alicloud/renewapi"`
ClientID string `json:"client_id" help:"Keep it empty if you don't have one"`
ClientSecret string `json:"client_secret" help:"Keep it empty if you don't have one"`
@ -24,12 +25,6 @@ type Addition struct {
var config = driver.Config{
Name: "AliyundriveOpen",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "root",
NoOverwriteUpload: true,
}

View File

@ -50,10 +50,10 @@ func calPartSize(fileSize int64) int64 {
return partSize
}
func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]PartInfo, error) {
func (d *AliyundriveOpen) getUploadUrl(ctx context.Context, count int, fileId, uploadId string) ([]PartInfo, error) {
partInfoList := makePartInfos(count)
var resp CreateResp
_, err := d.request("/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": fileId,
@ -69,7 +69,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
if d.InternalUpload {
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
}
req, err := http.NewRequestWithContext(ctx, "PUT", uploadUrl, r)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, r)
if err != nil {
return err
}
@ -84,10 +84,10 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
return nil
}
func (d *AliyundriveOpen) completeUpload(fileId, uploadId string) (model.Obj, error) {
func (d *AliyundriveOpen) completeUpload(ctx context.Context, fileId, uploadId string) (model.Obj, error) {
// 3. complete
var newFile File
_, err := d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": fileId,
@ -137,11 +137,8 @@ func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error
}
buf := make([]byte, length)
n, err := io.ReadFull(reader, buf)
if err == io.ErrUnexpectedEOF {
return "", fmt.Errorf("can't read data, expected=%d, got=%d", len(buf), n)
}
if err != nil {
return "", err
if n != int(length) {
return "", fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
}
return base64.StdEncoding.EncodeToString(buf), nil
}
@ -183,7 +180,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
createData["pre_hash"] = hash
}
var createResp CreateResp
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
_, err, e := d.requestReturnErrResp(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp)
})
if err != nil {
@ -194,7 +191,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
hash := stream.GetHash().GetHash(utils.SHA1)
if len(hash) != utils.SHA1.Width {
_, hash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA1)
_, hash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
if err != nil {
return nil, err
}
@ -208,7 +205,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if err != nil {
return nil, fmt.Errorf("cal proof code error: %s", err.Error())
}
_, err = d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
_, err = d.request(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp)
})
if err != nil {
@ -219,17 +216,20 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if !createResp.RapidUpload {
// 2. normal upload
log.Debugf("[aliyundive_open] normal upload")
ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize), &up)
if err != nil {
return nil, err
}
preTime := time.Now()
var offset, length int64 = 0, partSize
//var length
for i := 0; i < len(createResp.PartInfoList); i++ {
if utils.IsCanceled(ctx) {
return nil, ctx.Err()
}
// refresh upload url if 50 minutes passed
if time.Since(preTime) > 50*time.Minute {
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
createResp.PartInfoList, err = d.getUploadUrl(ctx, count, createResp.FileId, createResp.UploadId)
if err != nil {
return nil, err
}
@ -238,22 +238,19 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if remain := stream.GetSize() - offset; length > remain {
length = remain
}
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
if rapidUpload {
srd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
if err != nil {
return nil, err
}
rd = utils.NewMultiReadable(srd)
rd, err := ss.GetSectionReader(offset, length)
if err != nil {
return nil, err
}
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
err = retry.Do(func() error {
_ = rd.Reset()
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
rd.Seek(0, io.SeekStart)
return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i])
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
ss.FreeSectionReader(rd)
if err != nil {
return nil, err
}
@ -266,5 +263,5 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp)
// 3. complete
return d.completeUpload(createResp.FileId, createResp.UploadId)
return d.completeUpload(ctx, createResp.FileId, createResp.UploadId)
}

View File

@ -19,7 +19,7 @@ import (
// do others that not defined in Driver interface
func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
func (d *AliyundriveOpen) _refreshToken(ctx context.Context) (string, string, error) {
if d.UseOnlineAPI && d.APIAddress != "" {
u := d.APIAddress
var resp struct {
@ -27,13 +27,23 @@ func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
AccessToken string `json:"access_token"`
ErrorMessage string `json:"text"`
}
_, err := base.RestyClient.R().
// 根据AlipanType选项设置driver_txt
driverTxt := "alicloud_qr"
if d.AlipanType == "alipanTV" {
driverTxt = "alicloud_tv"
}
err := d.wait(ctx, limiterOther)
if err != nil {
return "", "", err
}
_, err = base.RestyClient.R().
SetHeader("User-Agent", "Mozilla/5.0 (Macintosh; Apple macOS 15_5) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36 Chrome/138.0.0.0 Openlist/425.6.30").
SetResult(&resp).
SetQueryParams(map[string]string{
"refresh_ui": d.RefreshToken,
"server_use": "true",
"driver_txt": "alicloud_qr",
"driver_txt": driverTxt,
}).
Get(u)
if err != nil {
@ -47,11 +57,14 @@ func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
}
return resp.RefreshToken, resp.AccessToken, nil
}
// 本地刷新逻辑,必须要求 client_id 和 client_secret
if d.ClientID == "" || d.ClientSecret == "" {
return "", "", fmt.Errorf("empty ClientID or ClientSecret")
}
err := d.wait(ctx, limiterOther)
if err != nil {
return "", "", err
}
url := API_URL + "/oauth/access_token"
//var resp base.TokenResp
var e ErrResp
@ -103,18 +116,18 @@ func getSub(token string) (string, error) {
return utils.Json.Get(bs, "sub").ToString(), nil
}
func (d *AliyundriveOpen) refreshToken() error {
func (d *AliyundriveOpen) refreshToken(ctx context.Context) error {
if d.ref != nil {
return d.ref.refreshToken()
return d.ref.refreshToken(ctx)
}
refresh, access, err := d._refreshToken()
refresh, access, err := d._refreshToken(ctx)
for i := 0; i < 3; i++ {
if err == nil {
break
} else {
log.Errorf("[ali_open] failed to refresh token: %s", err)
}
refresh, access, err = d._refreshToken()
refresh, access, err = d._refreshToken(ctx)
}
if err != nil {
return err
@ -125,12 +138,12 @@ func (d *AliyundriveOpen) refreshToken() error {
return nil
}
func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
b, err, _ := d.requestReturnErrResp(uri, method, callback, retry...)
func (d *AliyundriveOpen) request(ctx context.Context, limitTy limiterType, uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
b, err, _ := d.requestReturnErrResp(ctx, limitTy, uri, method, callback, retry...)
return b, err
}
func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
func (d *AliyundriveOpen) requestReturnErrResp(ctx context.Context, limitTy limiterType, uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
req := base.RestyClient.R()
// TODO check whether access_token is expired
req.SetHeader("Authorization", "Bearer "+d.getAccessToken())
@ -142,6 +155,10 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
}
var e ErrResp
req.SetError(&e)
err := d.wait(ctx, limitTy)
if err != nil {
return nil, err, nil
}
res, err := req.Execute(method, API_URL+uri)
if err != nil {
if res != nil {
@ -152,11 +169,11 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
isRetry := len(retry) > 0 && retry[0]
if e.Code != "" {
if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.getAccessToken() == "") {
err = d.refreshToken()
err = d.refreshToken(ctx)
if err != nil {
return nil, err, nil
}
return d.requestReturnErrResp(uri, method, callback, true)
return d.requestReturnErrResp(ctx, limitTy, uri, method, callback, true)
}
return nil, fmt.Errorf("%s:%s", e.Code, e.Message), &e
}
@ -165,7 +182,7 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
func (d *AliyundriveOpen) list(ctx context.Context, data base.Json) (*Files, error) {
var resp Files
_, err := d.request("/adrive/v1.0/openFile/list", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterList, "/adrive/v1.0/openFile/list", http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp)
})
if err != nil {
@ -194,7 +211,7 @@ func (d *AliyundriveOpen) getFiles(ctx context.Context, fileId string) ([]File,
//"video_thumbnail_width": 480,
//"image_thumbnail_width": 480,
}
resp, err := d.limitList(ctx, data)
resp, err := d.list(ctx, data)
if err != nil {
return nil, err
}

View File

@ -2,7 +2,6 @@ package aliyundrive_share
import (
"context"
"fmt"
"net/http"
"time"
@ -12,7 +11,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/rateg"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
@ -25,8 +23,7 @@ type AliyundriveShare struct {
DriveId string
cron *cron.Cron
limitList func(ctx context.Context, dir model.Obj) ([]model.Obj, error)
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
limiter *limiter
}
func (d *AliyundriveShare) Config() driver.Config {
@ -38,29 +35,26 @@ func (d *AliyundriveShare) GetAddition() driver.Additional {
}
func (d *AliyundriveShare) Init(ctx context.Context) error {
err := d.refreshToken()
d.limiter = getLimiter()
err := d.refreshToken(ctx)
if err != nil {
d.limiter.free()
d.limiter = nil
return err
}
err = d.getShareToken()
err = d.getShareToken(ctx)
if err != nil {
d.limiter.free()
d.limiter = nil
return err
}
d.cron = cron.NewCron(time.Hour * 2)
d.cron.Do(func() {
err := d.refreshToken()
err := d.refreshToken(ctx)
if err != nil {
log.Errorf("%+v", err)
}
})
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
Limit: 4,
Bucket: 1,
})
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
Limit: 1,
Bucket: 1,
})
return nil
}
@ -68,19 +62,14 @@ func (d *AliyundriveShare) Drop(ctx context.Context) error {
if d.cron != nil {
d.cron.Stop()
}
d.limiter.free()
d.limiter = nil
d.DriveId = ""
return nil
}
func (d *AliyundriveShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if d.limitList == nil {
return nil, fmt.Errorf("driver not init")
}
return d.limitList(ctx, dir)
}
func (d *AliyundriveShare) list(ctx context.Context, dir model.Obj) ([]model.Obj, error) {
files, err := d.getFiles(dir.GetID())
files, err := d.getFiles(ctx, dir.GetID())
if err != nil {
return nil, err
}
@ -90,13 +79,6 @@ func (d *AliyundriveShare) list(ctx context.Context, dir model.Obj) ([]model.Obj
}
func (d *AliyundriveShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.limitLink == nil {
return nil, fmt.Errorf("driver not init")
}
return d.limitLink(ctx, file)
}
func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Link, error) {
data := base.Json{
"drive_id": d.DriveId,
"file_id": file.GetID(),
@ -105,7 +87,7 @@ func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Lin
"share_id": d.ShareId,
}
var resp ShareLinkResp
_, err := d.request("https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterLink, "https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) {
req.SetHeader(CanaryHeaderKey, CanaryHeaderValue).SetBody(data).SetResult(&resp)
})
if err != nil {
@ -135,7 +117,7 @@ func (d *AliyundriveShare) Other(ctx context.Context, args model.OtherArgs) (int
default:
return nil, errs.NotSupport
}
_, err := d.request(url, http.MethodPost, func(req *resty.Request) {
_, err := d.request(ctx, limiterOther, url, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp)
})
if err != nil {

View File

@ -0,0 +1,67 @@
package aliyundrive_share
import (
"context"
"fmt"
"golang.org/x/time/rate"
)
// See issue https://github.com/OpenListTeam/OpenList/issues/724
// Seems there is no limit per user.
type limiterType int
const (
limiterList limiterType = iota
limiterLink
limiterOther
)
const (
listRateLimit = 3.9 // 4 per second in document, but we use 3.9 per second to be safe
linkRateLimit = 0.9 // 1 per second in document, but we use 0.9 per second to be safe
otherRateLimit = 14.9 // 15 per second in document, but we use 14.9 per second to be safe
)
type limiter struct {
list *rate.Limiter
link *rate.Limiter
other *rate.Limiter
}
func getLimiter() *limiter {
return &limiter{
list: rate.NewLimiter(rate.Limit(listRateLimit), 1),
link: rate.NewLimiter(rate.Limit(linkRateLimit), 1),
other: rate.NewLimiter(rate.Limit(otherRateLimit), 1),
}
}
func (l *limiter) wait(ctx context.Context, typ limiterType) error {
if l == nil {
return fmt.Errorf("driver not init")
}
switch typ {
case limiterList:
return l.list.Wait(ctx)
case limiterLink:
return l.link.Wait(ctx)
case limiterOther:
return l.other.Wait(ctx)
default:
return fmt.Errorf("unknown limiter type")
}
}
func (l *limiter) free() {
}
func (d *AliyundriveShare) wait(ctx context.Context, typ limiterType) error {
if d == nil {
return fmt.Errorf("driver not init")
}
//if d.ref != nil {
// return d.ref.wait(ctx, typ) // If this is a reference driver, wait on the reference driver.
//}
return d.limiter.wait(ctx, typ)
}

View File

@ -1,6 +1,7 @@
package aliyundrive_share
import (
"context"
"errors"
"fmt"
@ -15,11 +16,15 @@ const (
CanaryHeaderValue = "client=web,app=share,version=v2.3.1"
)
func (d *AliyundriveShare) refreshToken() error {
func (d *AliyundriveShare) refreshToken(ctx context.Context) error {
err := d.wait(ctx, limiterOther)
if err != nil {
return err
}
url := "https://auth.alipan.com/v2/account/token"
var resp base.TokenResp
var e ErrorResp
_, err := base.RestyClient.R().
_, err = base.RestyClient.R().
SetBody(base.Json{"refresh_token": d.RefreshToken, "grant_type": "refresh_token"}).
SetResult(&resp).
SetError(&e).
@ -36,7 +41,11 @@ func (d *AliyundriveShare) refreshToken() error {
}
// do others that not defined in Driver interface
func (d *AliyundriveShare) getShareToken() error {
func (d *AliyundriveShare) getShareToken(ctx context.Context) error {
err := d.wait(ctx, limiterOther)
if err != nil {
return err
}
data := base.Json{
"share_id": d.ShareId,
}
@ -45,7 +54,7 @@ func (d *AliyundriveShare) getShareToken() error {
}
var e ErrorResp
var resp ShareTokenResp
_, err := base.RestyClient.R().
_, err = base.RestyClient.R().
SetResult(&resp).SetError(&e).SetBody(data).
Post("https://api.alipan.com/v2/share_link/get_share_token")
if err != nil {
@ -58,7 +67,7 @@ func (d *AliyundriveShare) getShareToken() error {
return nil
}
func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback) ([]byte, error) {
func (d *AliyundriveShare) request(ctx context.Context, limitTy limiterType, url, method string, callback base.ReqCallback) ([]byte, error) {
var e ErrorResp
req := base.RestyClient.R().
SetError(&e).
@ -71,6 +80,10 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
} else {
req.SetBody("{}")
}
err := d.wait(ctx, limitTy)
if err != nil {
return nil, err
}
resp, err := req.Execute(method, url)
if err != nil {
return nil, err
@ -78,14 +91,14 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
if e.Code != "" {
if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" {
if e.Code == "AccessTokenInvalid" {
err = d.refreshToken()
err = d.refreshToken(ctx)
} else {
err = d.getShareToken()
err = d.getShareToken(ctx)
}
if err != nil {
return nil, err
}
return d.request(url, method, callback)
return d.request(ctx, limitTy, url, method, callback)
} else {
return nil, errors.New(e.Code + ": " + e.Message)
}
@ -93,7 +106,7 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
return resp.Body(), nil
}
func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
func (d *AliyundriveShare) getFiles(ctx context.Context, fileId string) ([]File, error) {
files := make([]File, 0)
data := base.Json{
"image_thumbnail_process": "image/resize,w_160/format,jpeg",
@ -110,6 +123,10 @@ func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
if data["marker"] == "first" {
data["marker"] = ""
}
err := d.wait(ctx, limiterList)
if err != nil {
return nil, err
}
var e ErrorResp
var resp ListResp
res, err := base.RestyClient.R().
@ -123,11 +140,11 @@ func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
log.Debugf("aliyundrive share get files: %s", res.String())
if e.Code != "" {
if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" {
err = d.getShareToken()
err = d.getShareToken(ctx)
if err != nil {
return nil, err
}
return d.getFiles(fileId)
return d.getFiles(ctx, fileId)
}
return nil, errors.New(e.Message)
}

View File

@ -23,6 +23,7 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve"
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve_v4"
_ "github.com/OpenListTeam/OpenList/v4/drivers/crypt"
_ "github.com/OpenListTeam/OpenList/v4/drivers/degoo"
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao"
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/dropbox"
@ -48,6 +49,7 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_app"
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_sharelink"
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist"
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak"
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_open"
@ -59,6 +61,7 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/smb"
_ "github.com/OpenListTeam/OpenList/v4/drivers/strm"
_ "github.com/OpenListTeam/OpenList/v4/drivers/teambition"
_ "github.com/OpenListTeam/OpenList/v4/drivers/teldrive"
_ "github.com/OpenListTeam/OpenList/v4/drivers/terabox"
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder"
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder_browser"

View File

@ -203,11 +203,12 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
streamSize := stream.GetSize()
sliceSize := d.getSliceSize(streamSize)
count := int(streamSize / sliceSize)
count := 1
if streamSize > sliceSize {
count = int((streamSize + sliceSize - 1) / sliceSize)
}
lastBlockSize := streamSize % sliceSize
if lastBlockSize > 0 {
count++
} else {
if lastBlockSize == 0 {
lastBlockSize = sliceSize
}

View File

@ -262,11 +262,12 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
// 计算需要的数据
streamSize := stream.GetSize()
count := int(streamSize / DEFAULT)
count := 1
if streamSize > DEFAULT {
count = int((streamSize + DEFAULT - 1) / DEFAULT)
}
lastBlockSize := streamSize % DEFAULT
if lastBlockSize > 0 {
count++
} else {
if lastBlockSize == 0 {
lastBlockSize = DEFAULT
}

View File

@ -255,7 +255,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
},
UpdateProgress: up,
})
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, "https://pan-yz.chaoxing.com/upload", r)
if err != nil {
return err
}

View File

@ -32,7 +32,6 @@ func init() {
config: driver.Config{
Name: "ChaoXingGroupDrive",
OnlyProxy: true,
OnlyLocal: false,
DefaultRoot: "-1",
NoOverwriteUpload: true,
},

View File

@ -167,7 +167,7 @@ func (d *ChaoXing) Login() (string, error) {
return "", err
}
// Create the request
req, err := http.NewRequest("POST", "https://passport2.chaoxing.com/fanyalogin", body)
req, err := http.NewRequest(http.MethodPost, "https://passport2.chaoxing.com/fanyalogin", body)
if err != nil {
return "", err
}

View File

@ -20,6 +20,7 @@ type Addition struct {
var config = driver.Config{
Name: "Cloudreve",
DefaultRoot: "/",
LocalSort: true,
}
func init() {

View File

@ -18,8 +18,10 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/cookie"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go"
)
@ -235,13 +237,16 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
}
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadURLs[0]
credential := u.Credential
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -249,69 +254,67 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
err = retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
req.ContentLength = byteSize
req.Header.Set("Authorization", fmt.Sprint(credential))
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return fmt.Errorf("server error: %d", res.StatusCode)
}
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
var up Resp
err = json.Unmarshal(body, &up)
if err != nil {
return err
}
if up.Code != 0 {
return errors.New(up.Msg)
}
return nil
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Authorization", fmt.Sprint(credential))
req.Header.Set("User-Agent", d.getUA())
err = func() error {
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return errors.New(res.Status)
}
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
var up Resp
err = json.Unmarshal(body, &up)
if err != nil {
return err
}
if up.Code != 0 {
return errors.New(up.Msg)
}
return nil
}()
if err == nil {
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
} else {
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
time.Sleep(backoff)
}
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
}
return nil
}
func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadURLs[0]
var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -319,46 +322,45 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
err = retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
req.ContentLength = byteSize
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
switch {
case res.StatusCode >= 500 && res.StatusCode <= 504:
return fmt.Errorf("server error: %d", res.StatusCode)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
data, _ := io.ReadAll(res.Body)
return errors.New(string(data))
default:
return nil
}
}, retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
switch {
case res.StatusCode >= 500 && res.StatusCode <= 504:
retryCount++
if retryCount > maxRetries {
res.Body.Close()
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
data, _ := io.ReadAll(res.Body)
res.Body.Close()
return errors.New(string(data))
default:
res.Body.Close()
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
}
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
}
// 上传成功发送回调请求
return d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
@ -367,12 +369,15 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
}
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
var finish int64 = 0
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -380,45 +385,47 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
req, err := http.NewRequest("PUT", u.UploadURLs[chunk],
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
err = retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadURLs[chunk],
driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
req.ContentLength = byteSize
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
etag := res.Header.Get("ETag")
res.Body.Close()
switch {
case res.StatusCode != 200:
return fmt.Errorf("server error: %d", res.StatusCode)
case etag == "":
return errors.New("failed to get ETag from header")
default:
etags = append(etags, etag)
return nil
}
}, retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
etag := res.Header.Get("ETag")
res.Body.Close()
switch {
case res.StatusCode != 200:
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-S3] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case etag == "":
return errors.New("failed to get ETag from header")
default:
retryCount = 0
etags = append(etags, etag)
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
}
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
}
// s3LikeFinishUpload
// https://github.com/cloudreve/frontend/blob/b485bf297974cbe4834d2e8e744ae7b7e5b2ad39/src/component/Uploader/core/api/index.ts#L204-L252
bodyBuilder := &strings.Builder{}
@ -431,8 +438,8 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
))
}
bodyBuilder.WriteString("</CompleteMultipartUpload>")
req, err := http.NewRequest(
"POST",
req, err := http.NewRequestWithContext(ctx,
http.MethodPost,
u.CompleteURL,
strings.NewReader(bodyBuilder.String()),
)

View File

@ -26,15 +26,8 @@ type Addition struct {
var config = driver.Config{
Name: "Cloudreve V4",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "cloudreve://my",
CheckStatus: true,
Alert: "",
NoOverwriteUpload: true,
}

View File

@ -47,7 +47,13 @@ type BasicConfigResp struct {
type SiteLoginConfigResp struct {
LoginCaptcha bool `json:"login_captcha"`
Authn bool `json:"authn"`
// RegCaptcha bool `json:"reg_captcha"`
// ForgetCaptcha bool `json:"forget_captcha"`
// RegisterEnabled bool `json:"register_enabled"`
// TosURL string `json:"tos_url"`
// PrivacyPolicyURL string `json:"privacy_policy_url"`
// SsoDisplayName string `json:"sso_display_name"`
// OidcDisplayName string `json:"oidc_display_name"`
}
type PrepareLoginResp struct {

View File

@ -19,7 +19,9 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go"
)
@ -95,9 +97,6 @@ func (d *CloudreveV4) login() error {
if err != nil {
return err
}
if !siteConfig.Authn {
return errors.New("authn not support")
}
var prepareLogin PrepareLoginResp
err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
if err != nil {
@ -253,13 +252,16 @@ func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u Fi
}
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadUrls[0]
credential := u.Credential
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -267,69 +269,67 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
err = retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
req.ContentLength = byteSize
req.Header.Set("Authorization", fmt.Sprint(credential))
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return fmt.Errorf("server error: %d", res.StatusCode)
}
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
var up Resp
err = json.Unmarshal(body, &up)
if err != nil {
return err
}
if up.Code != 0 {
return errors.New(up.Msg)
}
return nil
}, retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Authorization", fmt.Sprint(credential))
req.Header.Set("User-Agent", d.getUA())
err = func() error {
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return errors.New(res.Status)
}
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
var up Resp
err = json.Unmarshal(body, &up)
if err != nil {
return err
}
if up.Code != 0 {
return errors.New(up.Msg)
}
return nil
}()
if err == nil {
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
chunk++
} else {
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
time.Sleep(backoff)
}
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
chunk++
}
return nil
}
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadUrls[0]
var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -337,46 +337,46 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
err = retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
req.ContentLength = byteSize
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, file.GetSize()))
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
switch {
case res.StatusCode >= 500 && res.StatusCode <= 504:
return fmt.Errorf("server error: %d", res.StatusCode)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
data, _ := io.ReadAll(res.Body)
return errors.New(string(data))
default:
return nil
}
}, retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, file.GetSize()))
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
switch {
case res.StatusCode >= 500 && res.StatusCode <= 504:
retryCount++
if retryCount > maxRetries {
res.Body.Close()
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[CloudreveV4-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
data, _ := io.ReadAll(res.Body)
res.Body.Close()
return errors.New(string(data))
default:
res.Body.Close()
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
}
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
}
// 上传成功发送回调请求
return d.request(http.MethodPost, "/callback/onedrive/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) {
@ -385,12 +385,15 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
}
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
var finish int64 = 0
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -398,43 +401,47 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPut, u.UploadUrls[chunk],
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
err = retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadUrls[chunk],
driver.NewLimitedUploadStream(ctx, rd))
if err != nil {
return err
}
req.ContentLength = byteSize
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
etag := res.Header.Get("ETag")
res.Body.Close()
switch {
case res.StatusCode != 200:
return fmt.Errorf("server error: %d", res.StatusCode)
case etag == "":
return errors.New("failed to get ETag from header")
default:
etags = append(etags, etag)
return nil
}
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
etag := res.Header.Get("ETag")
res.Body.Close()
switch {
case res.StatusCode != 200:
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors", maxRetries)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("server error %d, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case etag == "":
return errors.New("failed to get ETag from header")
default:
retryCount = 0
etags = append(etags, etag)
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
chunk++
}
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
chunk++
}
// s3LikeFinishUpload
@ -448,8 +455,8 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
))
}
bodyBuilder.WriteString("</CompleteMultipartUpload>")
req, err := http.NewRequest(
"POST",
req, err := http.NewRequestWithContext(ctx,
http.MethodPost,
u.CompleteURL,
strings.NewReader(bodyBuilder.String()),
)

View File

@ -1,12 +1,14 @@
package crypt
import (
"bytes"
"context"
"fmt"
"io"
stdpath "path"
"regexp"
"strings"
"sync"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
@ -110,7 +112,7 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
//return d.list(ctx, d.RemotePath, path)
//remoteFull
objs, err := fs.List(ctx, d.getPathForRemote(path, true), &fs.ListArgs{NoLog: true})
objs, err := fs.List(ctx, d.getPathForRemote(path, true), &fs.ListArgs{NoLog: true, Refresh: args.Refresh})
// the obj must implement the model.SetPath interface
// return objs, err
if err != nil {
@ -241,6 +243,9 @@ func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) {
//return nil, errs.ObjectNotFound
}
// https://github.com/rclone/rclone/blob/v1.67.0/backend/crypt/cipher.go#L37
const fileHeaderSize = 32
func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
dstDirActualPath, err := d.getActualPathForRemote(file.GetPath(), false)
if err != nil {
@ -251,58 +256,68 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, err
}
if remoteLink.RangeReadCloser == nil && remoteLink.MFile == nil && len(remoteLink.URL) == 0 {
remoteSize := remoteLink.ContentLength
if remoteSize <= 0 {
remoteSize = remoteFile.GetSize()
}
rrf, err := stream.GetRangeReaderFromLink(remoteSize, remoteLink)
if err != nil {
_ = remoteLink.Close()
return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion")
}
resultRangeReadCloser := &model.RangeReadCloser{}
resultRangeReadCloser.TryAdd(remoteLink.MFile)
if remoteLink.RangeReadCloser != nil {
resultRangeReadCloser.AddClosers(remoteLink.RangeReadCloser.GetClosers())
}
remoteFileSize := remoteFile.GetSize()
rangeReaderFunc := func(ctx context.Context, underlyingOffset, underlyingLength int64) (io.ReadCloser, error) {
length := underlyingLength
if underlyingLength >= 0 && underlyingOffset+underlyingLength >= remoteFileSize {
length = -1
}
if remoteLink.MFile != nil {
_, err := remoteLink.MFile.Seek(underlyingOffset, io.SeekStart)
if err != nil {
return nil, err
}
//keep reuse same MFile and close at last.
return io.NopCloser(remoteLink.MFile), nil
}
rrc := remoteLink.RangeReadCloser
if rrc == nil && len(remoteLink.URL) > 0 {
var err error
rrc, err = stream.GetRangeReadCloserFromLink(remoteFileSize, remoteLink)
if err != nil {
return nil, err
}
resultRangeReadCloser.AddClosers(rrc.GetClosers())
remoteLink.RangeReadCloser = rrc
}
if rrc != nil {
remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length})
if err != nil {
return nil, err
}
return remoteReader, nil
}
return nil, errs.NotSupport
}
resultRangeReadCloser.RangeReader = func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length)
mu := &sync.Mutex{}
var fileHeader []byte
rangeReaderFunc := func(ctx context.Context, offset, limit int64) (io.ReadCloser, error) {
length := limit
if offset == 0 && limit > 0 {
mu.Lock()
if limit <= fileHeaderSize {
defer mu.Unlock()
if fileHeader != nil {
return io.NopCloser(bytes.NewReader(fileHeader[:limit])), nil
}
length = fileHeaderSize
} else if fileHeader == nil {
defer mu.Unlock()
} else {
mu.Unlock()
}
}
remoteReader, err := rrf.RangeRead(ctx, http_range.Range{Start: offset, Length: length})
if err != nil {
return nil, err
}
return readSeeker, nil
}
if offset == 0 && limit > 0 {
fileHeader = make([]byte, fileHeaderSize)
n, err := io.ReadFull(remoteReader, fileHeader)
if n != fileHeaderSize {
fileHeader = nil
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", fileHeaderSize, n, err)
}
if limit <= fileHeaderSize {
remoteReader.Close()
return io.NopCloser(bytes.NewReader(fileHeader[:limit])), nil
} else {
remoteReader = utils.ReadCloser{
Reader: io.MultiReader(bytes.NewReader(fileHeader), remoteReader),
Closer: remoteReader,
}
}
}
return remoteReader, nil
}
return &model.Link{
RangeReadCloser: resultRangeReadCloser,
RangeReader: stream.RangeReaderFunc(func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length)
if err != nil {
return nil, err
}
return readSeeker, nil
}),
SyncClosers: utils.NewSyncClosers(remoteLink),
}, nil
}
@ -386,7 +401,6 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
},
Reader: wrappedIn,
Mimetype: "application/octet-stream",
WebPutAsTask: streamer.NeedStore(),
ForceStreamUpload: true,
Exist: streamer.GetExist(),
}

View File

@ -26,17 +26,12 @@ type Addition struct {
}
var config = driver.Config{
Name: "Crypt",
LocalSort: true,
OnlyLocal: true,
OnlyProxy: true,
NoCache: true,
NoUpload: false,
NeedMs: false,
DefaultRoot: "/",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
Name: "Crypt",
LocalSort: true,
OnlyProxy: true,
NoCache: true,
DefaultRoot: "/",
NoLinkURL: true,
}
func init() {

203
drivers/degoo/driver.go Normal file
View File

@ -0,0 +1,203 @@
package degoo
import (
"context"
"fmt"
"net/http"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
type Degoo struct {
model.Storage
Addition
client *http.Client
}
func (d *Degoo) Config() driver.Config {
return config
}
func (d *Degoo) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Degoo) Init(ctx context.Context) error {
d.client = base.HttpClient
// Ensure we have a valid token (will login if needed or refresh if expired)
if err := d.ensureValidToken(ctx); err != nil {
return fmt.Errorf("failed to initialize token: %w", err)
}
return d.getDevices(ctx)
}
func (d *Degoo) Drop(ctx context.Context) error {
return nil
}
func (d *Degoo) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
items, err := d.getAllFileChildren5(ctx, dir.GetID())
if err != nil {
return nil, err
}
return utils.MustSliceConvert(items, func(s DegooFileItem) model.Obj {
isFolder := s.Category == 2 || s.Category == 1 || s.Category == 10
createTime, modTime, _ := humanReadableTimes(s.CreationTime, s.LastModificationTime, s.LastUploadTime)
size, err := strconv.ParseInt(s.Size, 10, 64)
if err != nil {
size = 0 // Default to 0 if size parsing fails
}
return &model.Object{
ID: s.ID,
Path: s.FilePath,
Name: s.Name,
Size: size,
Modified: modTime,
Ctime: createTime,
IsFolder: isFolder,
}
}), nil
}
func (d *Degoo) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
item, err := d.getOverlay4(ctx, file.GetID())
if err != nil {
return nil, err
}
return &model.Link{URL: item.URL}, nil
}
func (d *Degoo) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
// This is done by calling the setUploadFile3 API with a special checksum and size.
const query = `mutation SetUploadFile3($Token: String!, $FileInfos: [FileInfoUpload3]!) { setUploadFile3(Token: $Token, FileInfos: $FileInfos) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"FileInfos": []map[string]interface{}{
{
"Checksum": folderChecksum,
"Name": dirName,
"CreationTime": time.Now().UnixMilli(),
"ParentID": parentDir.GetID(),
"Size": 0,
},
},
}
_, err := d.apiCall(ctx, "SetUploadFile3", query, variables)
if err != nil {
return err
}
return nil
}
func (d *Degoo) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
const query = `mutation SetMoveFile($Token: String!, $Copy: Boolean, $NewParentID: String!, $FileIDs: [String]!) { setMoveFile(Token: $Token, Copy: $Copy, NewParentID: $NewParentID, FileIDs: $FileIDs) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"Copy": false,
"NewParentID": dstDir.GetID(),
"FileIDs": []string{srcObj.GetID()},
}
_, err := d.apiCall(ctx, "SetMoveFile", query, variables)
if err != nil {
return nil, err
}
return srcObj, nil
}
func (d *Degoo) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
const query = `mutation SetRenameFile($Token: String!, $FileRenames: [FileRenameInfo]!) { setRenameFile(Token: $Token, FileRenames: $FileRenames) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"FileRenames": []DegooFileRenameInfo{
{
ID: srcObj.GetID(),
NewName: newName,
},
},
}
_, err := d.apiCall(ctx, "SetRenameFile", query, variables)
if err != nil {
return err
}
return nil
}
func (d *Degoo) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
// Copy is not implemented, Degoo API does not support direct copy.
return nil, errs.NotImplement
}
func (d *Degoo) Remove(ctx context.Context, obj model.Obj) error {
// Remove deletes a file or folder (moves to trash).
const query = `mutation SetDeleteFile5($Token: String!, $IsInRecycleBin: Boolean!, $IDs: [IDType]!) { setDeleteFile5(Token: $Token, IsInRecycleBin: $IsInRecycleBin, IDs: $IDs) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"IsInRecycleBin": false,
"IDs": []map[string]string{{"FileID": obj.GetID()}},
}
_, err := d.apiCall(ctx, "SetDeleteFile5", query, variables)
return err
}
func (d *Degoo) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
tmpF, err := file.CacheFullAndWriter(&up, nil)
if err != nil {
return err
}
parentID := dstDir.GetID()
// Calculate the checksum for the file.
checksum, err := d.checkSum(tmpF)
if err != nil {
return err
}
// 1. Get upload authorization via getBucketWriteAuth4.
auths, err := d.getBucketWriteAuth4(ctx, file, parentID, checksum)
if err != nil {
return err
}
// 2. Upload file.
// support rapid upload
if auths.GetBucketWriteAuth4[0].Error != "Already exist!" {
err = d.uploadS3(ctx, auths, tmpF, file, checksum)
if err != nil {
return err
}
}
// 3. Register metadata with setUploadFile3.
data, err := d.SetUploadFile3(ctx, file, parentID, checksum)
if err != nil {
return err
}
if !data.SetUploadFile3 {
return fmt.Errorf("setUploadFile3 failed: %v", data)
}
return nil
}

27
drivers/degoo/meta.go Normal file
View File

@ -0,0 +1,27 @@
package degoo
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
driver.RootID
Username string `json:"username" help:"Your Degoo account email"`
Password string `json:"password" help:"Your Degoo account password"`
RefreshToken string `json:"refresh_token" help:"Refresh token for automatic token renewal, obtained automatically"`
AccessToken string `json:"access_token" help:"Access token for Degoo API, obtained automatically"`
}
var config = driver.Config{
Name: "Degoo",
LocalSort: true,
DefaultRoot: "0",
NoOverwriteUpload: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Degoo{}
})
}

110
drivers/degoo/types.go Normal file
View File

@ -0,0 +1,110 @@
package degoo
import (
"encoding/json"
)
// DegooLoginRequest represents the login request body.
type DegooLoginRequest struct {
GenerateToken bool `json:"GenerateToken"`
Username string `json:"Username"`
Password string `json:"Password"`
}
// DegooLoginResponse represents a successful login response.
type DegooLoginResponse struct {
Token string `json:"Token"`
RefreshToken string `json:"RefreshToken"`
}
// DegooAccessTokenRequest represents the token refresh request body.
type DegooAccessTokenRequest struct {
RefreshToken string `json:"RefreshToken"`
}
// DegooAccessTokenResponse represents the token refresh response.
type DegooAccessTokenResponse struct {
AccessToken string `json:"AccessToken"`
}
// DegooFileItem represents a Degoo file or folder.
type DegooFileItem struct {
ID string `json:"ID"`
ParentID string `json:"ParentID"`
Name string `json:"Name"`
Category int `json:"Category"`
Size string `json:"Size"`
URL string `json:"URL"`
CreationTime string `json:"CreationTime"`
LastModificationTime string `json:"LastModificationTime"`
LastUploadTime string `json:"LastUploadTime"`
MetadataID string `json:"MetadataID"`
DeviceID int64 `json:"DeviceID"`
FilePath string `json:"FilePath"`
IsInRecycleBin bool `json:"IsInRecycleBin"`
}
type DegooErrors struct {
Path []string `json:"path"`
Data interface{} `json:"data"`
ErrorType string `json:"errorType"`
ErrorInfo interface{} `json:"errorInfo"`
Message string `json:"message"`
}
// DegooGraphqlResponse is the common structure for GraphQL API responses.
type DegooGraphqlResponse struct {
Data json.RawMessage `json:"data"`
Errors []DegooErrors `json:"errors,omitempty"`
}
// DegooGetChildren5Data is the data field for getFileChildren5.
type DegooGetChildren5Data struct {
GetFileChildren5 struct {
Items []DegooFileItem `json:"Items"`
NextToken string `json:"NextToken"`
} `json:"getFileChildren5"`
}
// DegooGetOverlay4Data is the data field for getOverlay4.
type DegooGetOverlay4Data struct {
GetOverlay4 DegooFileItem `json:"getOverlay4"`
}
// DegooFileRenameInfo represents a file rename operation.
type DegooFileRenameInfo struct {
ID string `json:"ID"`
NewName string `json:"NewName"`
}
// DegooFileIDs represents a list of file IDs for move operations.
type DegooFileIDs struct {
FileIDs []string `json:"FileIDs"`
}
// DegooGetBucketWriteAuth4Data is the data field for GetBucketWriteAuth4.
type DegooGetBucketWriteAuth4Data struct {
GetBucketWriteAuth4 []struct {
AuthData struct {
PolicyBase64 string `json:"PolicyBase64"`
Signature string `json:"Signature"`
BaseURL string `json:"BaseURL"`
KeyPrefix string `json:"KeyPrefix"`
AccessKey struct {
Key string `json:"Key"`
Value string `json:"Value"`
} `json:"AccessKey"`
ACL string `json:"ACL"`
AdditionalBody []struct {
Key string `json:"Key"`
Value string `json:"Value"`
} `json:"AdditionalBody"`
} `json:"AuthData"`
Error interface{} `json:"Error"`
} `json:"getBucketWriteAuth4"`
}
// DegooSetUploadFile3Data is the data field for SetUploadFile3.
type DegooSetUploadFile3Data struct {
SetUploadFile3 bool `json:"setUploadFile3"`
}

198
drivers/degoo/upload.go Normal file
View File

@ -0,0 +1,198 @@
package degoo
import (
"bytes"
"context"
"crypto/sha1"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"strconv"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
func (d *Degoo) getBucketWriteAuth4(ctx context.Context, file model.FileStreamer, parentID string, checksum string) (*DegooGetBucketWriteAuth4Data, error) {
const query = `query GetBucketWriteAuth4(
$Token: String!
$ParentID: String!
$StorageUploadInfos: [StorageUploadInfo2]
) {
getBucketWriteAuth4(
Token: $Token
ParentID: $ParentID
StorageUploadInfos: $StorageUploadInfos
) {
AuthData {
PolicyBase64
Signature
BaseURL
KeyPrefix
AccessKey {
Key
Value
}
ACL
AdditionalBody {
Key
Value
}
}
Error
}
}`
variables := map[string]interface{}{
"Token": d.AccessToken,
"ParentID": parentID,
"StorageUploadInfos": []map[string]string{{
"FileName": file.GetName(),
"Checksum": checksum,
"Size": strconv.FormatInt(file.GetSize(), 10),
}}}
data, err := d.apiCall(ctx, "GetBucketWriteAuth4", query, variables)
if err != nil {
return nil, err
}
var resp DegooGetBucketWriteAuth4Data
err = json.Unmarshal(data, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
// checkSum calculates the SHA1-based checksum for Degoo upload API.
func (d *Degoo) checkSum(file io.Reader) (string, error) {
seed := []byte{13, 7, 2, 2, 15, 40, 75, 117, 13, 10, 19, 16, 29, 23, 3, 36}
hasher := sha1.New()
hasher.Write(seed)
if _, err := utils.CopyWithBuffer(hasher, file); err != nil {
return "", err
}
cs := hasher.Sum(nil)
csBytes := []byte{10, byte(len(cs))}
csBytes = append(csBytes, cs...)
csBytes = append(csBytes, 16, 0)
return strings.ReplaceAll(base64.StdEncoding.EncodeToString(csBytes), "/", "_"), nil
}
func (d *Degoo) uploadS3(ctx context.Context, auths *DegooGetBucketWriteAuth4Data, tmpF model.File, file model.FileStreamer, checksum string) error {
a := auths.GetBucketWriteAuth4[0].AuthData
_, err := tmpF.Seek(0, io.SeekStart)
if err != nil {
return err
}
ext := utils.Ext(file.GetName())
key := fmt.Sprintf("%s%s/%s.%s", a.KeyPrefix, ext, checksum, ext)
var b bytes.Buffer
w := multipart.NewWriter(&b)
err = w.WriteField("key", key)
if err != nil {
return err
}
err = w.WriteField("acl", a.ACL)
if err != nil {
return err
}
err = w.WriteField("policy", a.PolicyBase64)
if err != nil {
return err
}
err = w.WriteField("signature", a.Signature)
if err != nil {
return err
}
err = w.WriteField(a.AccessKey.Key, a.AccessKey.Value)
if err != nil {
return err
}
for _, additional := range a.AdditionalBody {
err = w.WriteField(additional.Key, additional.Value)
if err != nil {
return err
}
}
err = w.WriteField("Content-Type", "")
if err != nil {
return err
}
_, err = w.CreateFormFile("file", key)
if err != nil {
return err
}
headSize := b.Len()
err = w.Close()
if err != nil {
return err
}
head := bytes.NewReader(b.Bytes()[:headSize])
tail := bytes.NewReader(b.Bytes()[headSize:])
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.MultiReader(head, tmpF, tail))
req, err := http.NewRequestWithContext(ctx, http.MethodPost, a.BaseURL, rateLimitedRd)
if err != nil {
return err
}
req.Header.Add("ngsw-bypass", "1")
req.Header.Add("Content-Type", w.FormDataContentType())
res, err := d.client.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusNoContent {
return fmt.Errorf("upload failed with status code %d", res.StatusCode)
}
return nil
}
var _ driver.Driver = (*Degoo)(nil)
func (d *Degoo) SetUploadFile3(ctx context.Context, file model.FileStreamer, parentID string, checksum string) (*DegooSetUploadFile3Data, error) {
const query = `mutation SetUploadFile3($Token: String!, $FileInfos: [FileInfoUpload3]!) {
setUploadFile3(Token: $Token, FileInfos: $FileInfos)
}`
variables := map[string]interface{}{
"Token": d.AccessToken,
"FileInfos": []map[string]string{{
"Checksum": checksum,
"CreationTime": strconv.FormatInt(file.CreateTime().UnixMilli(), 10),
"Name": file.GetName(),
"ParentID": parentID,
"Size": strconv.FormatInt(file.GetSize(), 10),
}}}
data, err := d.apiCall(ctx, "SetUploadFile3", query, variables)
if err != nil {
return nil, err
}
var resp DegooSetUploadFile3Data
err = json.Unmarshal(data, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}

462
drivers/degoo/util.go Normal file
View File

@ -0,0 +1,462 @@
package degoo
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
// Thanks to https://github.com/bernd-wechner/Degoo for API research.
const (
// API endpoints
loginURL = "https://rest-api.degoo.com/login"
accessTokenURL = "https://rest-api.degoo.com/access-token/v2"
apiURL = "https://production-appsync.degoo.com/graphql"
// API configuration
apiKey = "da2-vs6twz5vnjdavpqndtbzg3prra"
folderChecksum = "CgAQAg"
// Token management
tokenRefreshThreshold = 5 * time.Minute
// Rate limiting
minRequestInterval = 1 * time.Second
// Error messages
errRateLimited = "rate limited (429), please try again later"
errUnauthorized = "unauthorized access"
)
var (
// Global rate limiting - protects against concurrent API calls
lastRequestTime time.Time
requestMutex sync.Mutex
)
// JWT payload structure for token expiration checking
type JWTPayload struct {
UserID string `json:"userID"`
Exp int64 `json:"exp"`
Iat int64 `json:"iat"`
}
// Rate limiting helper functions
// applyRateLimit ensures minimum interval between API requests
func applyRateLimit() {
requestMutex.Lock()
defer requestMutex.Unlock()
if !lastRequestTime.IsZero() {
if elapsed := time.Since(lastRequestTime); elapsed < minRequestInterval {
time.Sleep(minRequestInterval - elapsed)
}
}
lastRequestTime = time.Now()
}
// HTTP request helper functions
// createJSONRequest creates a new HTTP request with JSON body
func createJSONRequest(ctx context.Context, method, url string, body interface{}) (*http.Request, error) {
jsonBody, err := json.Marshal(body)
if err != nil {
return nil, fmt.Errorf("failed to marshal request body: %w", err)
}
req, err := http.NewRequestWithContext(ctx, method, url, bytes.NewBuffer(jsonBody))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", base.UserAgent)
return req, nil
}
// checkHTTPResponse checks for common HTTP error conditions
func checkHTTPResponse(resp *http.Response, operation string) error {
if resp.StatusCode == http.StatusTooManyRequests {
return fmt.Errorf("%s %s", operation, errRateLimited)
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s failed: %s", operation, resp.Status)
}
return nil
}
// isTokenExpired checks if the JWT token is expired or will expire soon
func (d *Degoo) isTokenExpired() bool {
if d.AccessToken == "" {
return true
}
payload, err := extractJWTPayload(d.AccessToken)
if err != nil {
return true // Invalid token format
}
// Check if token expires within the threshold
expireTime := time.Unix(payload.Exp, 0)
return time.Now().Add(tokenRefreshThreshold).After(expireTime)
}
// extractJWTPayload extracts and parses JWT payload
func extractJWTPayload(token string) (*JWTPayload, error) {
parts := strings.Split(token, ".")
if len(parts) != 3 {
return nil, fmt.Errorf("invalid JWT format")
}
// Decode the payload (second part)
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
return nil, fmt.Errorf("failed to decode JWT payload: %w", err)
}
var jwtPayload JWTPayload
if err := json.Unmarshal(payload, &jwtPayload); err != nil {
return nil, fmt.Errorf("failed to parse JWT payload: %w", err)
}
return &jwtPayload, nil
}
// refreshToken attempts to refresh the access token using the refresh token
func (d *Degoo) refreshToken(ctx context.Context) error {
if d.RefreshToken == "" {
return fmt.Errorf("no refresh token available")
}
// Create request
tokenReq := DegooAccessTokenRequest{RefreshToken: d.RefreshToken}
req, err := createJSONRequest(ctx, "POST", accessTokenURL, tokenReq)
if err != nil {
return fmt.Errorf("failed to create refresh token request: %w", err)
}
// Execute request
resp, err := d.client.Do(req)
if err != nil {
return fmt.Errorf("refresh token request failed: %w", err)
}
defer resp.Body.Close()
// Check response
if err := checkHTTPResponse(resp, "refresh token"); err != nil {
return err
}
var accessTokenResp DegooAccessTokenResponse
if err := json.NewDecoder(resp.Body).Decode(&accessTokenResp); err != nil {
return fmt.Errorf("failed to parse access token response: %w", err)
}
if accessTokenResp.AccessToken == "" {
return fmt.Errorf("empty access token received")
}
d.AccessToken = accessTokenResp.AccessToken
// Save the updated token to storage
op.MustSaveDriverStorage(d)
return nil
}
// ensureValidToken ensures we have a valid, non-expired token
func (d *Degoo) ensureValidToken(ctx context.Context) error {
// Check if token is expired or will expire soon
if d.isTokenExpired() {
// Try to refresh token first if we have a refresh token
if d.RefreshToken != "" {
if refreshErr := d.refreshToken(ctx); refreshErr == nil {
return nil // Successfully refreshed
} else {
// If refresh failed, fall back to full login
fmt.Printf("Token refresh failed, falling back to full login: %v\n", refreshErr)
}
}
// Perform full login
if d.Username != "" && d.Password != "" {
return d.login(ctx)
}
}
return nil
}
// login performs the login process and retrieves the access token.
func (d *Degoo) login(ctx context.Context) error {
if d.Username == "" || d.Password == "" {
return fmt.Errorf("username or password not provided")
}
creds := DegooLoginRequest{
GenerateToken: true,
Username: d.Username,
Password: d.Password,
}
jsonCreds, err := json.Marshal(creds)
if err != nil {
return fmt.Errorf("failed to serialize login credentials: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", loginURL, bytes.NewBuffer(jsonCreds))
if err != nil {
return fmt.Errorf("failed to create login request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", base.UserAgent)
req.Header.Set("Origin", "https://app.degoo.com")
resp, err := d.client.Do(req)
if err != nil {
return fmt.Errorf("login request failed: %w", err)
}
defer resp.Body.Close()
// Handle rate limiting (429 Too Many Requests)
if resp.StatusCode == http.StatusTooManyRequests {
return fmt.Errorf("login rate limited (429), please try again later")
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("login failed: %s", resp.Status)
}
var loginResp DegooLoginResponse
if err := json.NewDecoder(resp.Body).Decode(&loginResp); err != nil {
return fmt.Errorf("failed to parse login response: %w", err)
}
if loginResp.RefreshToken != "" {
tokenReq := DegooAccessTokenRequest{RefreshToken: loginResp.RefreshToken}
jsonTokenReq, err := json.Marshal(tokenReq)
if err != nil {
return fmt.Errorf("failed to serialize access token request: %w", err)
}
tokenReqHTTP, err := http.NewRequestWithContext(ctx, "POST", accessTokenURL, bytes.NewBuffer(jsonTokenReq))
if err != nil {
return fmt.Errorf("failed to create access token request: %w", err)
}
tokenReqHTTP.Header.Set("User-Agent", base.UserAgent)
tokenResp, err := d.client.Do(tokenReqHTTP)
if err != nil {
return fmt.Errorf("failed to get access token: %w", err)
}
defer tokenResp.Body.Close()
var accessTokenResp DegooAccessTokenResponse
if err := json.NewDecoder(tokenResp.Body).Decode(&accessTokenResp); err != nil {
return fmt.Errorf("failed to parse access token response: %w", err)
}
d.AccessToken = accessTokenResp.AccessToken
d.RefreshToken = loginResp.RefreshToken // Save refresh token
} else if loginResp.Token != "" {
d.AccessToken = loginResp.Token
d.RefreshToken = "" // Direct token, no refresh token available
} else {
return fmt.Errorf("login failed, no valid token returned")
}
// Save the updated tokens to storage
op.MustSaveDriverStorage(d)
return nil
}
// apiCall performs a Degoo GraphQL API request.
func (d *Degoo) apiCall(ctx context.Context, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
// Apply rate limiting
applyRateLimit()
// Ensure we have a valid token before making the API call
if err := d.ensureValidToken(ctx); err != nil {
return nil, fmt.Errorf("failed to ensure valid token: %w", err)
}
// Update the Token in variables if it exists (after potential refresh)
d.updateTokenInVariables(variables)
return d.executeGraphQLRequest(ctx, operationName, query, variables)
}
// updateTokenInVariables updates the Token field in GraphQL variables
func (d *Degoo) updateTokenInVariables(variables map[string]interface{}) {
if variables != nil {
if _, hasToken := variables["Token"]; hasToken {
variables["Token"] = d.AccessToken
}
}
}
// executeGraphQLRequest executes a GraphQL request with retry logic
func (d *Degoo) executeGraphQLRequest(ctx context.Context, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
reqBody := map[string]interface{}{
"operationName": operationName,
"query": query,
"variables": variables,
}
// Create and configure request
req, err := createJSONRequest(ctx, "POST", apiURL, reqBody)
if err != nil {
return nil, err
}
// Set Degoo-specific headers
req.Header.Set("x-api-key", apiKey)
if d.AccessToken != "" {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.AccessToken))
}
// Execute request
resp, err := d.client.Do(req)
if err != nil {
return nil, fmt.Errorf("GraphQL API request failed: %w", err)
}
defer resp.Body.Close()
// Check for HTTP errors
if err := checkHTTPResponse(resp, "GraphQL API"); err != nil {
return nil, err
}
// Parse GraphQL response
var degooResp DegooGraphqlResponse
if err := json.NewDecoder(resp.Body).Decode(&degooResp); err != nil {
return nil, fmt.Errorf("failed to decode GraphQL response: %w", err)
}
// Handle GraphQL errors
if len(degooResp.Errors) > 0 {
return d.handleGraphQLError(ctx, degooResp.Errors[0], operationName, query, variables)
}
return degooResp.Data, nil
}
// handleGraphQLError handles GraphQL-level errors with retry logic
func (d *Degoo) handleGraphQLError(ctx context.Context, gqlError DegooErrors, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
if gqlError.ErrorType == "Unauthorized" {
// Re-login and retry
if err := d.login(ctx); err != nil {
return nil, fmt.Errorf("%s, login failed: %w", errUnauthorized, err)
}
// Update token in variables and retry
d.updateTokenInVariables(variables)
return d.apiCall(ctx, operationName, query, variables)
}
return nil, fmt.Errorf("GraphQL API error: %s", gqlError.Message)
}
// humanReadableTimes converts Degoo timestamps to Go time.Time.
func humanReadableTimes(creation, modification, upload string) (cTime, mTime, uTime time.Time) {
cTime, _ = time.Parse(time.RFC3339, creation)
if modification != "" {
modMillis, _ := strconv.ParseInt(modification, 10, 64)
mTime = time.Unix(0, modMillis*int64(time.Millisecond))
}
if upload != "" {
upMillis, _ := strconv.ParseInt(upload, 10, 64)
uTime = time.Unix(0, upMillis*int64(time.Millisecond))
}
return cTime, mTime, uTime
}
// getDevices fetches and caches top-level devices and folders.
func (d *Degoo) getDevices(ctx context.Context) error {
const query = `query GetFileChildren5($Token: String! $ParentID: String $AllParentIDs: [String] $Limit: Int! $Order: Int! $NextToken: String ) { getFileChildren5(Token: $Token ParentID: $ParentID AllParentIDs: $AllParentIDs Limit: $Limit Order: $Order NextToken: $NextToken) { Items { ParentID } NextToken } }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"ParentID": "0",
"Limit": 10,
"Order": 3,
}
data, err := d.apiCall(ctx, "GetFileChildren5", query, variables)
if err != nil {
return err
}
var resp DegooGetChildren5Data
if err := json.Unmarshal(data, &resp); err != nil {
return fmt.Errorf("failed to parse device list: %w", err)
}
if d.RootFolderID == "0" {
if len(resp.GetFileChildren5.Items) > 0 {
d.RootFolderID = resp.GetFileChildren5.Items[0].ParentID
}
op.MustSaveDriverStorage(d)
}
return nil
}
// getAllFileChildren5 fetches all children of a directory with pagination.
func (d *Degoo) getAllFileChildren5(ctx context.Context, parentID string) ([]DegooFileItem, error) {
const query = `query GetFileChildren5($Token: String! $ParentID: String $AllParentIDs: [String] $Limit: Int! $Order: Int! $NextToken: String ) { getFileChildren5(Token: $Token ParentID: $ParentID AllParentIDs: $AllParentIDs Limit: $Limit Order: $Order NextToken: $NextToken) { Items { ID ParentID Name Category Size CreationTime LastModificationTime LastUploadTime FilePath IsInRecycleBin DeviceID MetadataID } NextToken } }`
var allItems []DegooFileItem
nextToken := ""
for {
variables := map[string]interface{}{
"Token": d.AccessToken,
"ParentID": parentID,
"Limit": 1000,
"Order": 3,
}
if nextToken != "" {
variables["NextToken"] = nextToken
}
data, err := d.apiCall(ctx, "GetFileChildren5", query, variables)
if err != nil {
return nil, err
}
var resp DegooGetChildren5Data
if err := json.Unmarshal(data, &resp); err != nil {
return nil, err
}
allItems = append(allItems, resp.GetFileChildren5.Items...)
if resp.GetFileChildren5.NextToken == "" {
break
}
nextToken = resp.GetFileChildren5.NextToken
}
return allItems, nil
}
// getOverlay4 fetches metadata for a single item by ID.
func (d *Degoo) getOverlay4(ctx context.Context, id string) (DegooFileItem, error) {
const query = `query GetOverlay4($Token: String!, $ID: IDType!) { getOverlay4(Token: $Token, ID: $ID) { ID ParentID Name Category Size CreationTime LastModificationTime LastUploadTime URL FilePath IsInRecycleBin DeviceID MetadataID } }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"ID": map[string]string{
"FileID": id,
},
}
data, err := d.apiCall(ctx, "GetOverlay4", query, variables)
if err != nil {
return DegooFileItem{}, err
}
var resp DegooGetOverlay4Data
if err := json.Unmarshal(data, &resp); err != nil {
return DegooFileItem{}, fmt.Errorf("failed to parse item metadata: %w", err)
}
return resp.GetOverlay4, nil
}

View File

@ -236,7 +236,7 @@ func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
// 根据文件大小选择上传方式
if file.GetSize() <= 1*utils.MB { // 小于1MB使用普通模式上传
return d.Upload(&uploadConfig, dstDir, file, up, dataType)
return d.Upload(ctx, &uploadConfig, dstDir, file, up, dataType)
}
// 大文件使用分片上传
return d.UploadByMultipart(ctx, &uploadConfig, file.GetSize(), dstDir, file, up, dataType)

View File

@ -16,17 +16,9 @@ type Addition struct {
}
var config = driver.Config{
Name: "Doubao",
LocalSort: true,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
Name: "Doubao",
LocalSort: true,
DefaultRoot: "0",
}
func init() {

View File

@ -129,7 +129,7 @@ type BuiAuditInfo struct {
AuditInfo AuditInfo `json:"audit_info"`
IsAuditing bool `json:"is_auditing"`
AuditStatus int `json:"audit_status"`
LastUpdateTime int `json:"last_update_time"`
LastUpdateTime int64 `json:"last_update_time"`
UnpassReason string `json:"unpass_reason"`
Details Details `json:"details"`
}
@ -184,7 +184,7 @@ type UserInfo struct {
SecUserID string `json:"sec_user_id"`
SessionKey string `json:"session_key"`
UseHmRegion bool `json:"use_hm_region"`
UserCreateTime int `json:"user_create_time"`
UserCreateTime int64 `json:"user_create_time"`
UserID int64 `json:"user_id"`
UserIDStr string `json:"user_id_str"`
UserVerified bool `json:"user_verified"`

View File

@ -14,7 +14,7 @@ import (
"math/rand"
"net/http"
"net/url"
"path/filepath"
stdpath "path"
"sort"
"strconv"
"strings"
@ -24,6 +24,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
@ -353,7 +354,7 @@ func (d *Doubao) getUploadConfig(upConfig *UploadConfig, dataType string, file m
"ServiceId": d.UploadToken.Alice[dataType].ServiceID,
"NeedFallback": "true",
"FileSize": strconv.FormatInt(file.GetSize(), 10),
"FileExtension": filepath.Ext(file.GetName()),
"FileExtension": stdpath.Ext(file.GetName()),
"s": randomString(),
}
}
@ -447,41 +448,67 @@ func (d *Doubao) uploadNode(uploadConfig *UploadConfig, dir model.Obj, file mode
}
// Upload 普通上传实现
func (d *Doubao) Upload(config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
data, err := io.ReadAll(file)
func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
ss, err := stream.NewStreamSectionReader(file, int(file.GetSize()), &up)
if err != nil {
return nil, err
}
reader, err := ss.GetSectionReader(0, file.GetSize())
if err != nil {
return nil, err
}
// 计算CRC32
crc32Hash := crc32.NewIEEE()
crc32Hash.Write(data)
w, err := utils.CopyWithBuffer(crc32Hash, reader)
if w != file.GetSize() {
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", file.GetSize(), w, err)
}
crc32Value := hex.EncodeToString(crc32Hash.Sum(nil))
// 构建请求路径
uploadNode := config.InnerUploadAddress.UploadNodes[0]
storeInfo := uploadNode.StoreInfos[0]
uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI)
uploadResp := UploadResp{}
if _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Type": "application/octet-stream",
"Content-Crc32": crc32Value,
"Content-Length": fmt.Sprintf("%d", len(data)),
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
})
req.SetBody(data)
}, &uploadResp); err != nil {
rateLimitedRd := driver.NewLimitedUploadStream(ctx, reader)
err = d._retryOperation("Upload", func() error {
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl, rateLimitedRd)
if err != nil {
return err
}
req.Header = map[string][]string{
"Referer": {BaseURL + "/"},
"Origin": {BaseURL},
"User-Agent": {UserAgent},
"X-Storage-U": {d.UserId},
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {fmt.Sprintf("%d", file.GetSize())},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
resp := UploadResp{}
utils.Json.Unmarshal(bytes, &resp)
if resp.Code != 2000 {
return fmt.Errorf("upload part failed: %s", resp.Message)
} else if resp.Data.Crc32 != crc32Value {
return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, resp.Data.Crc32)
}
return nil
})
ss.FreeSectionReader(reader)
if err != nil {
return nil, err
}
if uploadResp.Code != 2000 {
return nil, fmt.Errorf("upload failed: %s", uploadResp.Message)
}
uploadNodeResp, err := d.uploadNode(config, dstDir, file, dataType)
if err != nil {
return nil, err
@ -516,68 +543,107 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
if config.InnerUploadAddress.AdvanceOption.SliceSize > 0 {
chunkSize = int64(config.InnerUploadAddress.AdvanceOption.SliceSize)
}
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return nil, err
}
totalParts := (fileSize + chunkSize - 1) / chunkSize
// 创建分片信息组
parts := make([]UploadPart, totalParts)
// 缓存文件
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, fmt.Errorf("failed to cache file: %w", err)
}
up(10.0) // 更新进度
// 设置并行上传
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(1),
thread := min(int(totalParts), d.uploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(MaxRetryAttempts),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
retry.DelayType(retry.BackOffDelay),
retry.MaxJitter(200*time.Millisecond),
)
var partsMutex sync.Mutex
// 并行上传所有分片
for partIndex := int64(0); partIndex < totalParts; partIndex++ {
hash := crc32.NewIEEE()
for partIndex := range totalParts {
if utils.IsCanceled(uploadCtx) {
break
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片编号从1开始
threadG.Go(func(ctx context.Context) error {
// 计算此分片的大小和偏移
offset := partIndex * chunkSize
size := chunkSize
if partIndex == totalParts-1 {
size = fileSize - offset
}
limitedReader := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, size))
// 读取数据到内存
data, err := io.ReadAll(limitedReader)
if err != nil {
return fmt.Errorf("failed to read part %d: %w", partNumber, err)
}
// 计算CRC32
crc32Value := calculateCRC32(data)
// 使用_retryOperation上传分片
var uploadPart UploadPart
if err = d._retryOperation(fmt.Sprintf("Upload part %d", partNumber), func() error {
var err error
uploadPart, err = d.uploadPart(config, uploadUrl, uploadID, partNumber, data, crc32Value)
return err
}); err != nil {
return fmt.Errorf("part %d upload failed: %w", partNumber, err)
}
// 记录成功上传的分片
partsMutex.Lock()
parts[partIndex] = UploadPart{
PartNumber: strconv.FormatInt(partNumber, 10),
Etag: uploadPart.Etag,
Crc32: crc32Value,
}
partsMutex.Unlock()
// 更新进度
progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts)
up(math.Min(progress, 95.0))
return nil
// 计算此分片的大小和偏移
offset := partIndex * chunkSize
size := chunkSize
if partIndex == totalParts-1 {
size = fileSize - offset
}
var reader *stream.SectionReader
var rateLimitedRd io.Reader
crc32Value := ""
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
reader, err = ss.GetSectionReader(offset, size)
if err != nil {
return err
}
hash.Reset()
w, err := utils.CopyWithBuffer(hash, reader)
if w != size {
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", size, w, err)
}
crc32Value = hex.EncodeToString(hash.Sum(nil))
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
return nil
},
Do: func(ctx context.Context) error {
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s?uploadid=%s&part_number=%d&phase=transfer", uploadUrl, uploadID, partNumber), rateLimitedRd)
if err != nil {
return err
}
req.Header = map[string][]string{
"Referer": {BaseURL + "/"},
"Origin": {BaseURL},
"User-Agent": {UserAgent},
"X-Storage-U": {d.UserId},
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {fmt.Sprintf("%d", size)},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
uploadResp := UploadResp{}
utils.Json.Unmarshal(bytes, &uploadResp)
if uploadResp.Code != 2000 {
return fmt.Errorf("upload part failed: %s", uploadResp.Message)
} else if uploadResp.Data.Crc32 != crc32Value {
return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32)
}
// 记录成功上传的分片
partsMutex.Lock()
parts[partIndex] = UploadPart{
PartNumber: strconv.FormatInt(partNumber, 10),
Etag: uploadResp.Data.Etag,
Crc32: crc32Value,
}
partsMutex.Unlock()
// 更新进度
progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts)
up(math.Min(progress, 95.0))
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
})
}
@ -680,42 +746,6 @@ func (d *Doubao) initMultipartUpload(config *UploadConfig, uploadUrl string, sto
return uploadResp.Data.UploadId, nil
}
// 分片上传实现
func (d *Doubao) uploadPart(config *UploadConfig, uploadUrl, uploadID string, partNumber int64, data []byte, crc32Value string) (resp UploadPart, err error) {
uploadResp := UploadResp{}
storeInfo := config.InnerUploadAddress.UploadNodes[0].StoreInfos[0]
_, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Type": "application/octet-stream",
"Content-Crc32": crc32Value,
"Content-Length": fmt.Sprintf("%d", len(data)),
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
})
req.SetQueryParams(map[string]string{
"uploadid": uploadID,
"part_number": strconv.FormatInt(partNumber, 10),
"phase": "transfer",
})
req.SetBody(data)
req.SetContentLength(true)
}, &uploadResp)
if err != nil {
return resp, err
}
if uploadResp.Code != 2000 {
return resp, fmt.Errorf("upload part failed: %s", uploadResp.Message)
} else if uploadResp.Data.Crc32 != crc32Value {
return resp, fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32)
}
return uploadResp.Data, nil
}
// 完成分片上传
func (d *Doubao) completeMultipartUpload(config *UploadConfig, uploadUrl, uploadID string, parts []UploadPart) error {
uploadResp := UploadResp{}
@ -784,13 +814,6 @@ func (d *Doubao) commitMultipartUpload(uploadConfig *UploadConfig) error {
return nil
}
// 计算CRC32
func calculateCRC32(data []byte) string {
hash := crc32.NewIEEE()
hash.Write(data)
return hex.EncodeToString(hash.Sum(nil))
}
// _retryOperation 操作重试
func (d *Doubao) _retryOperation(operation string, fn func() error) error {
return retry.Do(

View File

@ -12,17 +12,10 @@ type Addition struct {
}
var config = driver.Config{
Name: "DoubaoShare",
LocalSort: true,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: true,
NeedMs: false,
DefaultRoot: "/",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
Name: "DoubaoShare",
LocalSort: true,
NoUpload: true,
DefaultRoot: "/",
}
func init() {

View File

@ -79,11 +79,11 @@ type ShareInfo struct {
RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"`
CreateTime int `json:"create_time"`
UpdateTime int `json:"update_time"`
CreateTime int64 `json:"create_time"`
UpdateTime int64 `json:"update_time"`
} `json:"first_node"`
NodeCount int `json:"node_count"`
CreateTime int `json:"create_time"`
CreateTime int64 `json:"create_time"`
Channel string `json:"channel"`
InfluencerType int `json:"influencer_type"`
}
@ -111,8 +111,8 @@ type FilePath []struct {
RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"`
CreateTime int `json:"create_time"`
UpdateTime int `json:"update_time"`
CreateTime int64 `json:"create_time"`
UpdateTime int64 `json:"update_time"`
}
type GetFileUrlResp struct {

View File

@ -192,12 +192,11 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
url := d.contentBase + "/2/files/upload_session/append_v2"
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize))
req, err := http.NewRequest(http.MethodPost, url, reader)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, reader)
if err != nil {
log.Errorf("failed to update file when append to upload session, err: %+v", err)
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken)

View File

@ -13,18 +13,11 @@ type Addition struct {
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
AccessToken string
RefreshToken string `json:"refresh_token" required:"true"`
RootNamespaceId string
RootNamespaceId string `json:"RootNamespaceId" required:"false"`
}
var config = driver.Config{
Name: "Dropbox",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "",
NoOverwriteUpload: true,
}

View File

@ -169,13 +169,19 @@ func (d *Dropbox) getFiles(ctx context.Context, path string) ([]File, error) {
func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset int64, sessionId string) error {
url := d.contentBase + "/2/files/upload_session/finish"
req, err := http.NewRequest(http.MethodPost, url, nil)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
if d.RootNamespaceId != "" {
apiPathRootJson, err := d.buildPathRootHeader()
if err != nil {
return err
}
req.Header.Set("Dropbox-API-Path-Root", apiPathRootJson)
}
uploadFinishArgs := UploadFinishArgs{
Commit: struct {
@ -214,13 +220,19 @@ func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset
func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
url := d.contentBase + "/2/files/upload_session/start"
req, err := http.NewRequest(http.MethodPost, url, nil)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
if err != nil {
return "", err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
if d.RootNamespaceId != "" {
apiPathRootJson, err := d.buildPathRootHeader()
if err != nil {
return "", err
}
req.Header.Set("Dropbox-API-Path-Root", apiPathRootJson)
}
req.Header.Set("Dropbox-API-Arg", "{\"close\":false}")
res, err := base.HttpClient.Do(req)
@ -235,3 +247,11 @@ func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
_ = res.Body.Close()
return sessionId, nil
}
func (d *Dropbox) buildPathRootHeader() (string, error) {
return utils.Json.MarshalToString(map[string]interface{}{
".tag": "root",
"root": d.RootNamespaceId,
})
}

View File

@ -16,17 +16,9 @@ type Addition struct {
}
var config = driver.Config{
Name: "FebBox",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: true,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
Name: "FebBox",
NoUpload: true,
DefaultRoot: "0",
}
func init() {

View File

@ -31,13 +31,13 @@ func (c *customTokenSource) Token() (*oauth2.Token, error) {
v.Set("client_id", c.config.ClientID)
v.Set("client_secret", c.config.ClientSecret)
req, err := http.NewRequest("POST", c.config.TokenURL, strings.NewReader(v.Encode()))
req, err := http.NewRequestWithContext(c.ctx, http.MethodPost, c.config.TokenURL, strings.NewReader(v.Encode()))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := http.DefaultClient.Do(req.WithContext(c.ctx))
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}

View File

@ -2,12 +2,16 @@ package ftp
import (
"context"
"errors"
"io"
stdpath "path"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/jlaffaye/ftp"
)
@ -15,6 +19,9 @@ type FTP struct {
model.Storage
Addition
conn *ftp.ServerConn
ctx context.Context
cancel context.CancelFunc
}
func (d *FTP) Config() driver.Config {
@ -26,12 +33,16 @@ func (d *FTP) GetAddition() driver.Additional {
}
func (d *FTP) Init(ctx context.Context) error {
return d.login()
d.ctx, d.cancel = context.WithCancel(context.Background())
var err error
d.conn, err = d._login(ctx)
return err
}
func (d *FTP) Drop(ctx context.Context) error {
if d.conn != nil {
_ = d.conn.Logout()
_ = d.conn.Quit()
d.cancel()
}
return nil
}
@ -61,19 +72,52 @@ func (d *FTP) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]m
}
func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if err := d.login(); err != nil {
conn, err := d._login(ctx)
if err != nil {
return nil, err
}
r := NewFileReader(d.conn, encode(file.GetPath(), d.Encoding), file.GetSize())
link := &model.Link{
MFile: &stream.RateLimitFile{
File: r,
Limiter: stream.ServerDownloadLimit,
Ctx: ctx,
},
path := encode(file.GetPath(), d.Encoding)
size := file.GetSize()
resultRangeReader := func(context context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
length := httpRange.Length
if length < 0 || httpRange.Start+length > size {
length = size - httpRange.Start
}
var c *ftp.ServerConn
if ctx == context {
c = conn
} else {
var err error
c, err = d._login(context)
if err != nil {
return nil, err
}
}
resp, err := c.RetrFrom(path, uint64(httpRange.Start))
if err != nil {
return nil, err
}
var close utils.CloseFunc
if context == ctx {
close = resp.Close
} else {
close = func() error {
return errors.Join(resp.Close(), c.Quit())
}
}
return utils.ReadCloser{
Reader: io.LimitReader(resp, length),
Closer: close,
}, nil
}
return link, nil
return &model.Link{
RangeReader: &model.FileRangeReader{
RangeReaderIF: stream.RateLimitRangeReaderFunc(resultRangeReader),
},
SyncClosers: utils.NewSyncClosers(utils.CloseFunc(conn.Quit)),
}, nil
}
func (d *FTP) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {

View File

@ -31,10 +31,11 @@ type Addition struct {
}
var config = driver.Config{
Name: "FTP",
LocalSort: true,
OnlyLocal: true,
DefaultRoot: "/",
Name: "FTP",
LocalSort: true,
OnlyLinkMFile: false,
DefaultRoot: "/",
NoLinkURL: true,
}
func init() {

View File

@ -1,116 +1,43 @@
package ftp
import (
"io"
"os"
"sync"
"sync/atomic"
"context"
"fmt"
"time"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/jlaffaye/ftp"
)
// do others that not defined in Driver interface
func (d *FTP) login() error {
if d.conn != nil {
_, err := d.conn.CurrentDir()
if err == nil {
return nil
_, err, _ := singleflight.AnyGroup.Do(fmt.Sprintf("FTP.login:%p", d), func() (any, error) {
var err error
if d.conn != nil {
err = d.conn.NoOp()
if err != nil {
d.conn.Quit()
d.conn = nil
}
}
}
conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second))
if d.conn == nil {
d.conn, err = d._login(d.ctx)
}
return nil, err
})
return err
}
func (d *FTP) _login(ctx context.Context) (*ftp.ServerConn, error) {
conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second), ftp.DialWithContext(ctx))
if err != nil {
return err
return nil, err
}
err = conn.Login(d.Username, d.Password)
if err != nil {
return err
conn.Quit()
return nil, err
}
d.conn = conn
return nil
}
// FileReader An FTP file reader that implements io.MFile for seeking.
type FileReader struct {
conn *ftp.ServerConn
resp *ftp.Response
offset atomic.Int64
readAtOffset int64
mu sync.Mutex
path string
size int64
}
func NewFileReader(conn *ftp.ServerConn, path string, size int64) *FileReader {
return &FileReader{
conn: conn,
path: path,
size: size,
}
}
func (r *FileReader) Read(buf []byte) (n int, err error) {
n, err = r.ReadAt(buf, r.offset.Load())
r.offset.Add(int64(n))
return
}
func (r *FileReader) ReadAt(buf []byte, off int64) (n int, err error) {
if off < 0 {
return -1, os.ErrInvalid
}
r.mu.Lock()
defer r.mu.Unlock()
if off != r.readAtOffset {
//have to restart the connection, to correct offset
_ = r.resp.Close()
r.resp = nil
}
if r.resp == nil {
r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
r.readAtOffset = off
if err != nil {
return 0, err
}
}
n, err = r.resp.Read(buf)
r.readAtOffset += int64(n)
return
}
func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
oldOffset := r.offset.Load()
var newOffset int64
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekCurrent:
newOffset = oldOffset + offset
case io.SeekEnd:
return r.size, nil
default:
return -1, os.ErrInvalid
}
if newOffset < 0 {
// offset out of range
return oldOffset, os.ErrInvalid
}
if newOffset == oldOffset {
// offset not changed, so return directly
return oldOffset, nil
}
r.offset.Store(newOffset)
return newOffset, nil
}
func (r *FileReader) Close() error {
if r.resp != nil {
return r.resp.Close()
}
return nil
return conn, nil
}

View File

@ -9,6 +9,7 @@ import (
"text/template"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/ProtonMail/go-crypto/openpgp"
@ -96,7 +97,7 @@ func getPathCommonAncestor(a, b string) (ancestor, aChildName, bChildName, aRest
}
func getUsername(ctx context.Context) string {
user, ok := ctx.Value("user").(*model.User)
user, ok := ctx.Value(conf.UserKey).(*model.User)
if !ok {
return "<system>"
}

View File

@ -15,17 +15,8 @@ type Addition struct {
}
var config = driver.Config{
Name: "GitHub Releases",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
Name: "GitHub Releases",
NoUpload: true,
}
func init() {

View File

@ -162,7 +162,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
SetBody(driver.NewLimitedUploadStream(ctx, stream))
}, nil)
} else {
err = d.chunkUpload(ctx, stream, putUrl)
err = d.chunkUpload(ctx, stream, putUrl, up)
}
return err
}

Some files were not shown because too many files have changed in this diff Show More