Compare commits

..

38 Commits
v4.1.1 ... main

Author SHA1 Message Date
3936e736e6 feat(drivers): add a driver that divides large files into multiple chunks (#1153) 2025-09-19 19:27:35 +08:00
68433d4f5b fix(local): cannot mkdir on specific platforms (#1304) 2025-09-19 15:34:58 +08:00
cc16cb35bf feat(style): add driver icons and disk usage (#1274)
* feat(style): add driver icons and disk usage

* feat(driver): add disk usage for 115_open, 123_open, aliyundrive_open and baidu_netdisk

* feat(driver): add disk usage for crypt, sftp and smb

* chore: clean unused variable

* feat(driver): add disk usage for cloudreve_v4

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(local): disk label check when getting disk usage

* feat(style): return details when accessing the manage page

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-09-19 11:59:11 +08:00
d3bc6321f4 chore(build): update Go version to 1.25.0 across workflows and build scripts (#1290)
build: update Go version to 1.25.0 across workflows and build scripts

fixes #1286
2025-09-16 18:44:29 +08:00
cbbb5ad231 fix(stream): http chucked upload issue (#1152)
* fix(stream): http chucked upload issue

* fix(stream): use MmapThreshold

* fix(stream): improve caching mechanism and handle size=0 case

* fix bug

* fix(buffer): optimize ReadAt method for improved performance

* fix(upload): handle Content-Length and File-Size headers for better size management

* fix(189pc): 移除重复限速

* fix(upload): handle negative file size during streaming uploads

* fix(upload): update header key from File-Size to X-File-Size for size retrieval

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-09-15 19:36:16 +08:00
c1d03c5bcc fix(security): zip slip (#1228)
* fix(security): Zip Slip

* chore:remove repeat clean

* fix: archives,iso9660 and rardecode module

---------

Co-authored-by: ILoveScratch <ilovescratch@foxmail.com>
2025-09-15 13:25:21 +08:00
61a8ed515f fix(123): add get and list hash info (#1278) 2025-09-14 21:36:54 +08:00
bbb7c06504 feat(alias): support pass through provider (#1269) 2025-09-14 21:36:38 +08:00
8bbdb272d4 docs(readme): extend driver list with newest support (#1271) 2025-09-13 20:41:17 +08:00
c15ae94307 feat(189PC,189TV): add refreshToken and qrcode login (#1205)
### Key Changes
- **189PC**: Add QR code login and refresh token support
- **189TV**: Add session refresh mechanism and fix TempUuid persistence issue
- **Both**: Implement session keep-alive with cron jobs (5min interval)

### Features
- QR code authentication for 189PC as alternative to password login
- Automatic token refresh to avoid frequent re-authentication
- Session keep-alive to maintain long-term connections
- Retry logic with max attempts to prevent infinite loops

### Fixes
- Fixed 189TV TempUuid causing storage corruption on QR code reload
- Enhanced error handling for token expiration scenarios
2025-09-13 13:59:47 +08:00
f1a5048558 feat(drivers): add cnb_releases (#1033)
* feat(drivers): add cnb_releases

* feat(cnb_release): implement reference

* refactor(cnb_releases): get release info by ID instead of tag name

* feat(cnb_releases): add option to use tag name instead of release name

* fix(cnb_releases): set default root and improve release info retrieval

* feat(cnb_releases): implement Put

* perf(cnb_release): use io.Pipe to stream file upload

* perf(cnb_releases): add context timeout for file upload request

* feat(cnb_releases): implement Remove

* feat(cnb_releases): implement MakeDir

* feat(cnb_releases): implement Rename

* feat(cnb_releases): require repo and token in Addition

* chore(cnb_releases): remove unused code

* Revert 'perf(cnb_release): use io.Pipe to stream file upload'

* perf(cnb_releases): optimize upload with MultiReader

* feat(cnb_releases): add DefaultBranch

---------

Co-authored-by: ILoveScratch <ilovescratch@foxmail.com>
2025-09-11 18:11:32 +08:00
1fe26bff9a feat(local): auto create recycle dir if not exists (#1244) 2025-09-10 20:57:21 +08:00
433dcd156b fix(ci): add tag_name to upload assets step (#1234)
fix(release): add tag_name to upload assets step
2025-09-06 22:51:05 +08:00
e97f0a289e feat(cloudreve_v4): enhance token management (#1171)
* fix(cloudreve_v4): improve error handling in request method

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* feat(cloudreve_v4): enhance token management with expiration checks and refresh logic

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* feat(cloudreve_v4): add JWT structures for access and refresh tokens; validate access token on initialization

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(cloudreve_v4): improve error messages

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-09-04 19:41:41 +08:00
89f35170b3 fix(fs): clear cache after directory rename to ensure consistency (#1193)
Clear cache after renaming the directory.
2025-09-01 18:47:54 +08:00
8188fb2d7d fix(123open): get direct link (#1185)
* fix(123open): correct query parameter name from 'fileId' to 'fileID' in getDirectLink function

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123open): change SpaceTempExpr type from 'string' to 'int64' in UserInfoResp struct

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123open): comment out unused fields in UserInfoResp struct

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123open): add getUID method and cache UID

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-31 15:47:38 +08:00
87cf95f50b fix(139): refactor part upload logic (#1184)
* fix(139): refactor part upload logic

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): handle upload errors

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): sort upload parts by PartNumber before uploading

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): improve error handling

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): add validation for upload part index to prevent out of bounds errors

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-31 15:47:12 +08:00
8ab26cb823 fix(123open): change DirectLink type from 'boolean' to 'bool' (#1180)
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-29 19:06:37 +08:00
5880c8e1af fix(189tv): use rate-limited upload stream in OldUpload function (#1176)
* fix(189tv): use rate-limited upload stream in OldUpload function

* fix(189tv): wrap tempFile with io.NopCloser to prevent premature closure in OldUpload function

* .
2025-08-29 16:01:50 +08:00
14bf4ecb4c fix(share): support custom proxy url (#1130)
* feat(share): support custom proxy url

* fix(share): count access

* fix: maybe a path traversal vulnerability?
2025-08-28 22:11:19 +08:00
04a5e58781 fix(server): can't edit .md source files (#1159)
* fix(server): can't edit .md source files

* chore

* add ignore direct link args
2025-08-28 16:19:57 +08:00
bbd4389345 fix(wopan): use fixed timezone for parsing time (#1170)
fix(wopan): update getTime function to use fixed timezone for parsing

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-28 13:02:02 +08:00
f350ccdf95 fix(189pc): sliceSize must not be equal to fileSize (#1169)
* fix(189pc): sliceSize not equal to fileSize

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* Update comment for sliceSize parameter

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-28 11:32:40 +08:00
4f2de9395e feat(degoo): token improvement (#1149)
* Update driver.go

Signed-off-by: Caspian <app@caspian.im>

* Update meta.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* make account optional

* ensure username and password

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: Caspian <app@caspian.im>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-26 01:22:59 +08:00
b0dbbebfb0 feat(drivers): add Teldrive driver (#1116)
https://github.com/tgdrive/teldrive

https://teldrive-docs.pages.dev/docs/api

实现:
* copy
* move
* link (302 share and local proxy)
* chunked uploads
* rename

未实现:
- openlist扫码登陆
- refresh token

https://github.com/OpenListTeam/OpenList-Docs/pull/155


* feat(Teldrive): Add driver Teldrive

* fix(teldrive): force webproxy and memory optimized

* chore(teldrive): go fmt

* chore(teldrive): remove TODO

* chore(teldrive): organize code

* feat(teldrive): add UseShareLink option and support 302

* fix(teldrive): standardize API path construction

* fix(teldrive): trim trailing slash from Address in Init method

* chore(teldrive): update help text for UseShareLink field in Addition struct

* fix(teldrive): set 10 MiB as default chunk size

---------

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: ILoveScratch <ilovescratch@foxmail.com>
2025-08-25 01:34:08 +08:00
0c27b4bd47 docs(contributing): update guidelines (#983)
[skip ci]

* docs(contributing): update guidelines

* docs(contributing): clarify fork

* docs(contributing): sync translation

Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* docs(contributing): add label and cc reminder

* docs(contributing): remove ensure new branch from checklist

* docs(contributing): replace generic GitHub URLs with user-specific ones

* docs(contributing): make branch deletion after PR merge optional

* docs(contributing): keep --recurse-submodules

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
2025-08-24 20:13:11 +08:00
736cd9e5f2 fix(quark): fix getTranscodingLink (#1136)
The first video info may not contain url

* fix(quark): fix getTranscodingLink

* fix(quark_tv): fix getTranscodingLink
2025-08-24 19:55:10 +08:00
c7a603c926 fix(115): fix get 115 app version (#1137) 2025-08-24 19:50:21 +08:00
a28d6d5693 fix(123_open): fix token refresh (#1121) 2025-08-23 23:01:41 +08:00
e59d2233e2 feat(drivers): add Degoo driver (#1097)
* Create driver.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Create util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Create types.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Create meta.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update drivers/degoo/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: CaspianGUAN <app@caspian.im>

* Update drivers/degoo/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: CaspianGUAN <app@caspian.im>

* Update driver.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update meta.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update types.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update driver.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update drivers/degoo/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: CaspianGUAN <app@caspian.im>

* Update util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* refactor(degoo): add Degoo driver integration and update API handling

* fix(degoo): apply suggestions

---------

Signed-off-by: CaspianGUAN <app@caspian.im>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-23 22:47:02 +08:00
01914a06ef refactor(ci): add permissions check at docker's entrypoint (#1128)
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-22 19:35:48 +08:00
6499374d1c fix(deps): update 115driver to v1.1.1 (close SheltonZhu/115driver#57) (#1115) 2025-08-20 21:33:21 +08:00
b054919d5c feat(ilanzou): add support for rapid upload and fix duplication handling (#1065)
* feat(ilanzou): add support for rapid upload token handling

* feat(ilanzou): add NoOverwriteUpload option
2025-08-19 19:19:44 +08:00
048ee9c2e5 feat(server): adapting #1099 to #991 (#1102) 2025-08-19 15:48:59 +08:00
23394548ca feat(123_open): add DirectLink option (#1045)
* feat(123_open): add `UseDirectLink` option

* feat(123_open): update rate limit rules

* fix(123_open): update api

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* feat(123_open): enhance direct link functionality with private key and expiration

* refactor(123_open): use UUID for random generation

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-19 15:23:10 +08:00
b04677b806 feat(server): add error page and status code (#1099) 2025-08-19 15:18:12 +08:00
e4c902dd93 feat(share): support more secure file sharing (#991)
提供一种类似大多数网盘的文件分享操作,这种分享方式可以通过强制 Web 代理隐藏文件源路径,可以设置分享码、最大访问数和过期时间,并且不需要启用 guest 用户。

在全局设置中可以调整:
- 是否强制 Web 代理
- 是否允许预览
- 是否允许预览压缩文件
- 分享文件后,点击“复制链接”按钮复制的内容

前端部分:OpenListTeam/OpenList-Frontend#156
文档部分:OpenListTeam/OpenList-Docs#130

Close #183
Close #526
Close #860
Close #892
Close #1079


* feat(share): support more secure file sharing

* feat(share): add archive preview

* fix(share): fix some bugs

* feat(openlist_share): add openlist share driver

* fix(share): lack unwrap when get virtual path

* fix: use unwrapPath instead of path for virtual file name comparison

* fix(share): change request method of /api/share/list from GET to Any

* fix(share): path traversal vulnerability in sharing path check

* 修复分享alias驱动的文件 没开代理时无法获取URL

* fix(sharing): update error message for sharing root link extraction

---------

Co-authored-by: Suyunmeng <69945917+Suyunmeng@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-08-19 15:10:02 +08:00
5d8bd258c0 refactor(docker): reduce docker image size (#1091)
* fix(docker): reduce image size

* refactor(docker): update user and group creation

* Update Dockerfile

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-19 10:27:33 +08:00
133 changed files with 6591 additions and 633 deletions

56
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,56 @@
<!--
Provide a general summary of your changes in the Title above.
The PR title must start with `feat(): `, `docs(): `, `fix(): `, `style(): `, or `refactor(): `, `chore(): `. For example: `feat(component): add new feature`.
If it spans multiple components, use the main component as the prefix and enumerate in the title, describe in the body.
-->
<!--
在上方标题中提供您更改的总体摘要。
PR 标题需以 `feat(): `, `docs(): `, `fix(): `, `style(): `, `refactor(): `, `chore(): ` 其中之一开头,例如:`feat(component): 新增功能`
如果跨多个组件,请使用主要组件作为前缀,并在标题中枚举、描述中说明。
-->
## Description / 描述
<!-- Describe your changes in detail -->
<!-- 详细描述您的更改 -->
## Motivation and Context / 背景
<!-- Why is this change required? What problem does it solve? -->
<!-- 为什么需要此更改?它解决了什么问题? -->
<!-- If it fixes an open issue, please link to the issue here. -->
<!-- 如果修复了一个打开的issue请在此处链接到该issue -->
Closes #XXXX
<!-- or -->
<!-- 或者 -->
Relates to #XXXX
## How Has This Been Tested? / 测试
<!-- Please describe in detail how you tested your changes. -->
<!-- 请详细描述您如何测试更改 -->
## Checklist / 检查清单
<!-- Go over all the following points, and put an `x` in all the boxes that apply. -->
<!-- 检查以下所有要点,并在所有适用的框中打`x` -->
<!-- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
<!-- 如果您对其中任何一项不确定,请不要犹豫提问。我们会帮助您! -->
- [ ] I have read the [CONTRIBUTING](https://github.com/OpenListTeam/OpenList/blob/main/CONTRIBUTING.md) document.
我已阅读 [CONTRIBUTING](https://github.com/OpenListTeam/OpenList/blob/main/CONTRIBUTING.md) 文档。
- [ ] I have formatted my code with `go fmt` or [prettier](https://prettier.io/).
我已使用 `go fmt` 或 [prettier](https://prettier.io/) 格式化提交的代码。
- [ ] I have added appropriate labels to this PR (or mentioned needed labels in the description if lacking permissions).
我已为此 PR 添加了适当的标签(如无权限或需要的标签不存在,请在描述中说明,管理员将后续处理)。
- [ ] I have requested review from relevant code authors using the "Request review" feature when applicable.
我已在适当情况下使用"Request review"功能请求相关代码作者进行审查。
- [ ] I have updated the repository accordingly (If its needed).
我已相应更新了相关仓库(若适用)。
- [ ] [OpenList-Frontend](https://github.com/OpenListTeam/OpenList-Frontend) #XXXX
- [ ] [OpenList-Docs](https://github.com/OpenListTeam/OpenList-Docs) #XXXX

View File

@ -87,7 +87,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: "1.24.5"
go-version: "1.25.0"
- name: Setup web
run: bash build.sh dev web

View File

@ -33,7 +33,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: "1.24.5"
go-version: "1.25.0"
- name: Setup web
run: bash build.sh dev web

View File

@ -46,7 +46,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '1.24'
go-version: '1.25.0'
- name: Checkout
uses: actions/checkout@v4
@ -73,4 +73,5 @@ jobs:
with:
files: build/compress/*
prerelease: false
tag_name: ${{ github.event.release.tag_name }}

View File

@ -47,7 +47,7 @@ jobs:
- uses: actions/setup-go@v5
with:
go-version: 'stable'
go-version: '1.25.0'
- name: Cache Musl
id: cache-musl
@ -87,7 +87,7 @@ jobs:
- uses: actions/setup-go@v5
with:
go-version: 'stable'
go-version: '1.25.0'
- name: Cache Musl
id: cache-musl

View File

@ -36,7 +36,7 @@ jobs:
- uses: actions/setup-go@v5
with:
go-version: 'stable'
go-version: '1.25.0'
- name: Cache Musl
id: cache-musl

View File

@ -2,106 +2,76 @@
## Setup your machine
`OpenList` is written in [Go](https://golang.org/) and [React](https://reactjs.org/).
`OpenList` is written in [Go](https://golang.org/) and [SolidJS](https://www.solidjs.com/).
Prerequisites:
- [git](https://git-scm.com)
- [Go 1.20+](https://golang.org/doc/install)
- [Go 1.24+](https://golang.org/doc/install)
- [gcc](https://gcc.gnu.org/)
- [nodejs](https://nodejs.org/)
Clone `OpenList` and `OpenList-Frontend` anywhere:
## Cloning a fork
Fork and clone `OpenList` and `OpenList-Frontend` anywhere:
```shell
$ git clone https://github.com/OpenListTeam/OpenList.git
$ git clone --recurse-submodules https://github.com/OpenListTeam/OpenList-Frontend.git
$ git clone https://github.com/<your-username>/OpenList.git
$ git clone --recurse-submodules https://github.com/<your-username>/OpenList-Frontend.git
```
## Creating a branch
Create a new branch from the `main` branch, with an appropriate name.
```shell
$ git checkout -b <branch-name>
```
You should switch to the `main` branch for development.
## Preview your change
### backend
```shell
$ go run main.go
```
### frontend
```shell
$ pnpm dev
```
## Add a new driver
Copy `drivers/template` folder and rename it, and follow the comments in it.
## Create a commit
Commit messages should be well formatted, and to make that "standardized".
### Commit Message Format
Each commit message consists of a **header**, a **body** and a **footer**. The header has a special
format that includes a **type**, a **scope** and a **subject**:
Submit your pull request. For PR titles, follow [Conventional Commits](https://www.conventionalcommits.org).
```
<type>(<scope>): <subject>
<BLANK LINE>
<body>
<BLANK LINE>
<footer>
```
https://github.com/OpenListTeam/OpenList/issues/376
The **header** is mandatory and the **scope** of the header is optional.
Any line of the commit message cannot be longer than 100 characters! This allows the message to be easier
to read on GitHub as well as in various git tools.
### Revert
If the commit reverts a previous commit, it should begin with `revert: `, followed by the header
of the reverted commit.
In the body it should say: `This reverts commit <hash>.`, where the hash is the SHA of the commit
being reverted.
### Type
Must be one of the following:
* **feat**: A new feature
* **fix**: A bug fix
* **docs**: Documentation only changes
* **style**: Changes that do not affect the meaning of the code (white-space, formatting, missing
semi-colons, etc)
* **refactor**: A code change that neither fixes a bug nor adds a feature
* **perf**: A code change that improves performance
* **test**: Adding missing or correcting existing tests
* **build**: Affects project builds or dependency modifications
* **revert**: Restore the previous commit
* **ci**: Continuous integration of related file modifications
* **chore**: Changes to the build process or auxiliary tools and libraries such as documentation
generation
* **release**: Release a new version
### Scope
The scope could be anything specifying place of the commit change. For example `$location`,
`$browser`, `$compile`, `$rootScope`, `ngHref`, `ngClick`, `ngView`, etc...
You can use `*` when the change affects more than a single scope.
### Subject
The subject contains succinct description of the change:
* use the imperative, present tense: "change" not "changed" nor "changes"
* don't capitalize first letter
* no dot (.) at the end
### Body
Just as in the **subject**, use the imperative, present tense: "change" not "changed" nor "changes".
The body should include the motivation for the change and contrast this with previous behavior.
### Footer
The footer should contain any information about **Breaking Changes** and is also the place to
[reference GitHub issues that this commit closes](https://help.github.com/articles/closing-issues-via-commit-messages/).
**Breaking Changes** should start with the word `BREAKING CHANGE:` with a space or two newlines.
The rest of the commit message is then used for this.
It's suggested to sign your commits. See: [How to sign commits](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits)
## Submit a pull request
Push your branch to your `openlist` fork and open a pull request against the
`main` branch.
Please make sure your code has been formatted with `go fmt` or [prettier](https://prettier.io/) before submitting.
Push your branch to your `openlist` fork and open a pull request against the `main` branch.
## Merge your pull request
Your pull request will be merged after review. Please wait for the maintainer to merge your pull request after review.
At least 1 approving review is required by reviewers with write access. You can also request a review from maintainers.
## Delete your branch
(Optional) After your pull request is merged, you can delete your branch.
---
Thank you for your contribution! Let's make OpenList better together!

View File

@ -20,11 +20,12 @@ ARG GID=1001
WORKDIR /opt/openlist/
COPY --chmod=755 --from=builder /app/bin/openlist ./
COPY --chmod=755 entrypoint.sh /entrypoint.sh
RUN adduser -u ${UID} -g ${GID} -h /opt/openlist/data -D -s /bin/sh ${USER} \
&& chown -R ${UID}:${GID} /opt \
&& chown -R ${UID}:${GID} /entrypoint.sh
RUN addgroup -g ${GID} ${USER} && \
adduser -D -u ${UID} -G ${USER} ${USER} && \
mkdir -p /opt/openlist/data
COPY --from=builder --chmod=755 --chown=${UID}:${GID} /app/bin/openlist ./
COPY --chmod=755 --chown=${UID}:${GID} entrypoint.sh /entrypoint.sh
USER ${USER}
RUN /entrypoint.sh version

View File

@ -10,12 +10,12 @@ ARG GID=1001
WORKDIR /opt/openlist/
COPY --chmod=755 /build/${TARGETPLATFORM}/openlist ./
COPY --chmod=755 entrypoint.sh /entrypoint.sh
RUN addgroup -g ${GID} ${USER} && \
adduser -D -u ${UID} -G ${USER} ${USER} && \
mkdir -p /opt/openlist/data
RUN adduser -u ${UID} -g ${GID} -h /opt/openlist/data -D -s /bin/sh ${USER} \
&& chown -R ${UID}:${GID} /opt \
&& chown -R ${UID}:${GID} /entrypoint.sh
COPY --chmod=755 --chown=${UID}:${GID} /build/${TARGETPLATFORM}/openlist ./
COPY --chmod=755 --chown=${UID}:${GID} entrypoint.sh /entrypoint.sh
USER ${USER}
RUN /entrypoint.sh version

View File

@ -74,7 +74,6 @@ Thank you for your support and understanding of the OpenList project.
- [x] [Thunder](https://pan.xunlei.com)
- [x] [Lanzou](https://www.lanzou.com)
- [x] [ILanzou](https://www.ilanzou.com)
- [x] [Aliyundrive share](https://www.alipan.com)
- [x] [Google photo](https://photos.google.com)
- [x] [Mega.nz](https://mega.nz)
- [x] [Baidu photo](https://photo.baidu.com)
@ -85,6 +84,16 @@ Thank you for your support and understanding of the OpenList project.
- [x] [FeijiPan](https://www.feijipan.com)
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
- [x] [Chaoxing](https://www.chaoxing.com)
- [x] [CNB](https://cnb.cool/)
- [x] [Degoo](https://degoo.com)
- [x] [Doubao](https://www.doubao.com)
- [x] [Febbox](https://www.febbox.com)
- [x] [GitHub](https://github.com)
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
- [x] [Weiyun](https://www.weiyun.com)
- [x] Easy to deploy and out-of-the-box
- [x] File preview (PDF, markdown, code, plain text, ...)
- [x] Image preview in gallery mode

View File

@ -74,7 +74,6 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
- [x] [迅雷网盘](https://pan.xunlei.com)
- [x] [蓝奏云](https://www.lanzou.com)
- [x] [蓝奏云优享版](https://www.ilanzou.com)
- [x] [阿里云盘分享](https://www.alipan.com)
- [x] [Google 相册](https://photos.google.com)
- [x] [Mega.nz](https://mega.nz)
- [x] [百度相册](https://photo.baidu.com)
@ -85,6 +84,15 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
- [x] [飞机盘](https://www.feijipan.com)
- [x] [多吉云](https://www.dogecloud.com/product/oss)
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
- [x] [超星](https://www.chaoxing.com)
- [x] [CNB](https://cnb.cool/)
- [x] [Degoo](https://degoo.com)
- [x] [豆包](https://www.doubao.com)
- [x] [Febbox](https://www.febbox.com)
- [x] [GitHub](https://github.com)
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
- [x] [微云](https://www.weiyun.com)
- [x] 部署方便,开箱即用
- [x] 文件预览PDF、markdown、代码、纯文本等
- [x] 画廊模式下的图片预览

View File

@ -74,7 +74,6 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
- [x] [Thunder](https://pan.xunlei.com)
- [x] [Lanzou](https://www.lanzou.com)
- [x] [ILanzou](https://www.ilanzou.com)
- [x] [Aliyundrive share](https://www.alipan.com)
- [x] [Google photo](https://photos.google.com)
- [x] [Mega.nz](https://mega.nz)
- [x] [Baidu photo](https://photo.baidu.com)
@ -85,6 +84,15 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
- [x] [FeijiPan](https://www.feijipan.com)
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
- [x] [Chaoxing](https://www.chaoxing.com)
- [x] [CNB](https://cnb.cool/)
- [x] [Degoo](https://degoo.com)
- [x] [Doubao](https://www.doubao.com)
- [x] [Febbox](https://www.febbox.com)
- [x] [GitHub](https://github.com)
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
- [x] [Weiyun](https://www.weiyun.com)
- [x] 簡単にデプロイでき、すぐに使える
- [x] ファイルプレビューPDF、markdown、コード、テキストなど
- [x] ギャラリーモードでの画像プレビュー

View File

@ -74,7 +74,6 @@ Dank u voor uw ondersteuning en begrip
- [x] [Thunder](https://pan.xunlei.com)
- [x] [Lanzou](https://www.lanzou.com)
- [x] [ILanzou](https://www.ilanzou.com)
- [x] [Aliyundrive share](https://www.alipan.com)
- [x] [Google photo](https://photos.google.com)
- [x] [Mega.nz](https://mega.nz)
- [x] [Baidu photo](https://photo.baidu.com)
@ -85,6 +84,15 @@ Dank u voor uw ondersteuning en begrip
- [x] [FeijiPan](https://www.feijipan.com)
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
- [x] [Chaoxing](https://www.chaoxing.com)
- [x] [CNB](https://cnb.cool/)
- [x] [Degoo](https://degoo.com)
- [x] [Doubao](https://www.doubao.com)
- [x] [Febbox](https://www.febbox.com)
- [x] [GitHub](https://github.com)
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
- [x] [Weiyun](https://www.weiyun.com)
- [x] Eenvoudig te implementeren en direct te gebruiken
- [x] Bestandsvoorbeeld (PDF, markdown, code, platte tekst, ...)
- [x] Afbeeldingsvoorbeeld in galerijweergave

View File

@ -236,7 +236,7 @@ BuildRelease() {
BuildLoongGLIBC() {
local target_abi="$2"
local output_file="$1"
local oldWorldGoVersion="1.24.3"
local oldWorldGoVersion="1.25.0"
if [ "$target_abi" = "abi1.0" ]; then
echo building for linux-loong64-abi1.0
@ -254,13 +254,13 @@ BuildLoongGLIBC() {
# Download and setup patched Go compiler for old-world
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250821/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
-o go-loong64-abi1.0.tar.gz; then
echo "Error: Failed to download patched Go compiler for old-world ABI1.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250821/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
-o go-loong64-abi1.0.tar.gz || true
fi
return 1

View File

@ -1,43 +1,60 @@
package _115
import (
"errors"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
log "github.com/sirupsen/logrus"
)
var (
md5Salt = "Qclm8MGWUv59TnrR0XPg"
appVer = "27.0.5.7"
appVer = "35.6.0.3"
)
func (d *Pan115) getAppVersion() ([]driver115.AppVersion, error) {
result := driver115.VersionResp{}
resp, err := base.RestyClient.R().Get(driver115.ApiGetVersion)
err = driver115.CheckErr(err, &result, resp)
func (d *Pan115) getAppVersion() (string, error) {
result := VersionResp{}
res, err := base.RestyClient.R().Get(driver115.ApiGetVersion)
if err != nil {
return nil, err
return "", err
}
return result.Data.GetAppVersions(), nil
err = utils.Json.Unmarshal(res.Body(), &result)
if err != nil {
return "", err
}
if len(result.Error) > 0 {
return "", errors.New(result.Error)
}
return result.Data.Win.Version, nil
}
func (d *Pan115) getAppVer() string {
// todo add some cache
vers, err := d.getAppVersion()
ver, err := d.getAppVersion()
if err != nil {
log.Warnf("[115] get app version failed: %v", err)
return appVer
}
for _, ver := range vers {
if ver.AppName == "win" {
return ver.Version
}
if len(ver) > 0 {
return ver
}
return appVer
}
func (d *Pan115) initAppVer() {
appVer = d.getAppVer()
log.Debugf("use app version: %v", appVer)
}
type VersionResp struct {
Error string `json:"error,omitempty"`
Data Versions `json:"data"`
}
type Versions struct {
Win Version `json:"win"`
}
type Version struct {
Version string `json:"version_code"`
}

View File

@ -337,6 +337,27 @@ func (d *Open115) OfflineList(ctx context.Context) (*sdk.OfflineTaskListResp, er
return resp, nil
}
func (d *Open115) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
userInfo, err := d.client.UserInfo(ctx)
if err != nil {
return nil, err
}
total, err := userInfo.RtSpaceInfo.AllTotal.Size.Int64()
if err != nil {
return nil, err
}
free, err := userInfo.RtSpaceInfo.AllRemain.Size.Int64()
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: uint64(total),
FreeSpace: uint64(free),
},
}, nil
}
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
// return nil, errs.NotImplement

View File

@ -28,7 +28,7 @@ func (f File) CreateTime() time.Time {
}
func (f File) GetHash() utils.HashInfo {
return utils.HashInfo{}
return utils.NewHashInfo(utils.MD5, f.Etag)
}
func (f File) GetPath() string {

View File

@ -17,6 +17,7 @@ import (
type Open123 struct {
model.Storage
Addition
UID uint64
}
func (d *Open123) Config() driver.Config {
@ -69,13 +70,45 @@ func (d *Open123) List(ctx context.Context, dir model.Obj, args model.ListArgs)
func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
fileId, _ := strconv.ParseInt(file.GetID(), 10, 64)
if d.DirectLink {
res, err := d.getDirectLink(fileId)
if err != nil {
return nil, err
}
if d.DirectLinkPrivateKey == "" {
duration := 365 * 24 * time.Hour // 缓存1年
return &model.Link{
URL: res.Data.URL,
Expiration: &duration,
}, nil
}
uid, err := d.getUID()
if err != nil {
return nil, err
}
duration := time.Duration(d.DirectLinkValidDuration) * time.Minute
newURL, err := d.SignURL(res.Data.URL, d.DirectLinkPrivateKey,
uid, duration)
if err != nil {
return nil, err
}
return &model.Link{
URL: newURL,
Expiration: &duration,
}, nil
}
res, err := d.getDownloadInfo(fileId)
if err != nil {
return nil, err
}
link := model.Link{URL: res.Data.DownloadUrl}
return &link, nil
return &model.Link{URL: res.Data.DownloadUrl}, nil
}
func (d *Open123) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
@ -181,5 +214,20 @@ func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
return nil, fmt.Errorf("upload complete timeout")
}
func (d *Open123) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
userInfo, err := d.getUserInfo()
if err != nil {
return nil, err
}
total := userInfo.Data.SpacePermanent + userInfo.Data.SpaceTemp
free := total - userInfo.Data.SpaceUsed
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: free,
},
}, nil
}
var _ driver.Driver = (*Open123)(nil)
var _ driver.PutResult = (*Open123)(nil)

View File

@ -23,6 +23,11 @@ type Addition struct {
// 上传线程数
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
// 使用直链
DirectLink bool `json:"DirectLink" type:"bool" default:"false" required:"false" help:"use direct link when download file"`
DirectLinkPrivateKey string `json:"DirectLinkPrivateKey" required:"false" help:"private key for direct link, if URL authentication is enabled"`
DirectLinkValidDuration int64 `json:"DirectLinkValidDuration" type:"number" default:"30" required:"false" help:"minutes, if URL authentication is enabled"`
driver.RootID
}

View File

@ -127,19 +127,19 @@ type RefreshTokenResp struct {
type UserInfoResp struct {
BaseResp
Data struct {
UID int64 `json:"uid"`
Username string `json:"username"`
DisplayName string `json:"displayName"`
HeadImage string `json:"headImage"`
Passport string `json:"passport"`
Mail string `json:"mail"`
SpaceUsed int64 `json:"spaceUsed"`
SpacePermanent int64 `json:"spacePermanent"`
SpaceTemp int64 `json:"spaceTemp"`
SpaceTempExpr string `json:"spaceTempExpr"`
Vip bool `json:"vip"`
DirectTraffic int64 `json:"directTraffic"`
IsHideUID bool `json:"isHideUID"`
UID uint64 `json:"uid"`
// Username string `json:"username"`
// DisplayName string `json:"displayName"`
// HeadImage string `json:"headImage"`
// Passport string `json:"passport"`
// Mail string `json:"mail"`
SpaceUsed uint64 `json:"spaceUsed"`
SpacePermanent uint64 `json:"spacePermanent"`
SpaceTemp uint64 `json:"spaceTemp"`
// SpaceTempExpr int64 `json:"spaceTempExpr"`
// Vip bool `json:"vip"`
// DirectTraffic int64 `json:"directTraffic"`
// IsHideUID bool `json:"isHideUID"`
} `json:"data"`
}
@ -158,6 +158,13 @@ type DownloadInfoResp struct {
} `json:"data"`
}
type DirectLinkResp struct {
BaseResp
Data struct {
URL string `json:"url"`
} `json:"data"`
}
// 创建文件V2返回
type UploadCreateResp struct {
BaseResp

View File

@ -1,15 +1,20 @@
package _123_open
import (
"crypto/md5"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/go-resty/resty/v2"
"github.com/google/uuid"
log "github.com/sirupsen/logrus"
)
@ -20,7 +25,8 @@ var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于
RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1)
UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1)
FileList = InitApiInfo(Api+"/api/v2/file/list", 3)
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 0)
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 5)
DirectLink = InitApiInfo(Api+"/api/v1/direct-link/url", 5)
Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2)
Move = InitApiInfo(Api+"/api/v1/file/move", 1)
Rename = InitApiInfo(Api+"/api/v1/file/name", 1)
@ -80,8 +86,24 @@ func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCall
}
func (d *Open123) flushAccessToken() error {
if d.Addition.ClientID != "" {
if d.Addition.ClientSecret != "" {
if d.ClientID != "" {
if d.RefreshToken != "" {
var resp RefreshTokenResp
_, err := d.Request(RefreshToken, http.MethodPost, func(req *resty.Request) {
req.SetQueryParam("client_id", d.ClientID)
if d.ClientSecret != "" {
req.SetQueryParam("client_secret", d.ClientSecret)
}
req.SetQueryParam("grant_type", "refresh_token")
req.SetQueryParam("refresh_token", d.RefreshToken)
}, &resp)
if err != nil {
return err
}
d.AccessToken = resp.AccessToken
d.RefreshToken = resp.RefreshToken
op.MustSaveDriverStorage(d)
} else if d.ClientSecret != "" {
var resp AccessTokenResp
_, err := d.Request(AccessToken, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
@ -94,24 +116,38 @@ func (d *Open123) flushAccessToken() error {
}
d.AccessToken = resp.Data.AccessToken
op.MustSaveDriverStorage(d)
} else if d.Addition.RefreshToken != "" {
var resp RefreshTokenResp
_, err := d.Request(RefreshToken, http.MethodPost, func(req *resty.Request) {
req.SetQueryParam("client_id", d.ClientID)
req.SetQueryParam("grant_type", "refresh_token")
req.SetQueryParam("refresh_token", d.Addition.RefreshToken)
}, &resp)
if err != nil {
return err
}
d.AccessToken = resp.AccessToken
d.RefreshToken = resp.RefreshToken
op.MustSaveDriverStorage(d)
}
}
return nil
}
func (d *Open123) SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (newURL string, err error) {
// 生成Unix时间戳
ts := time.Now().Add(validDuration).Unix()
// 生成随机数建议使用UUID不能包含中划线-
rand := strings.ReplaceAll(uuid.New().String(), "-", "")
// 解析URL
objURL, err := url.Parse(originURL)
if err != nil {
return "", err
}
// 待签名字符串格式path-timestamp-rand-uid-privateKey
unsignedStr := fmt.Sprintf("%s-%d-%s-%d-%s", objURL.Path, ts, rand, uid, privateKey)
md5Hash := md5.Sum([]byte(unsignedStr))
// 生成鉴权参数格式timestamp-rand-uid-md5hash
authKey := fmt.Sprintf("%d-%s-%d-%x", ts, rand, uid, md5Hash)
// 添加鉴权参数到URL查询参数
v := objURL.Query()
v.Add("auth_key", authKey)
objURL.RawQuery = v.Encode()
return objURL.String(), nil
}
func (d *Open123) getUserInfo() (*UserInfoResp, error) {
var resp UserInfoResp
@ -122,6 +158,18 @@ func (d *Open123) getUserInfo() (*UserInfoResp, error) {
return &resp, nil
}
func (d *Open123) getUID() (uint64, error) {
if d.UID != 0 {
return d.UID, nil
}
resp, err := d.getUserInfo()
if err != nil {
return 0, err
}
d.UID = resp.Data.UID
return resp.Data.UID, nil
}
func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*FileListResp, error) {
var resp FileListResp
@ -159,6 +207,21 @@ func (d *Open123) getDownloadInfo(fileId int64) (*DownloadInfoResp, error) {
return &resp, nil
}
func (d *Open123) getDirectLink(fileId int64) (*DirectLinkResp, error) {
var resp DirectLinkResp
_, err := d.Request(DirectLink, http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"fileID": strconv.FormatInt(fileId, 10),
})
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Open123) mkdir(parentID int64, name string) error {
_, err := d.Request(Mkdir, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{

View File

@ -24,7 +24,7 @@ type File struct {
}
func (f File) GetHash() utils.HashInfo {
return utils.HashInfo{}
return utils.NewHashInfo(utils.MD5, f.Etag)
}
func (f File) GetPath() string {

View File

@ -534,16 +534,15 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
if size > partSize {
part = (size + partSize - 1) / partSize
}
// 生成所有 partInfos
partInfos := make([]PartInfo, 0, part)
for i := int64(0); i < part; i++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
start := i * partSize
byteSize := size - start
if byteSize > partSize {
byteSize = partSize
}
byteSize := min(size-start, partSize)
partNumber := i + 1
partInfo := PartInfo{
PartNumber: partNumber,
@ -591,17 +590,20 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
// resp.Data.RapidUpload: true 支持快传,但此处直接检测是否返回分片的上传地址
// 快传的情况下同样需要手动处理冲突
if resp.Data.PartInfos != nil {
// 读取前100个分片的上传地址
uploadPartInfos := resp.Data.PartInfos
// Progress
p := driver.NewProgress(size, up)
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
// 获取后续分片的上传地址
for i := 101; i < len(partInfos); i += 100 {
end := i + 100
if end > len(partInfos) {
end = len(partInfos)
}
// 先上传前100个分片
err = d.uploadPersonalParts(ctx, partInfos, resp.Data.PartInfos, rateLimited, p)
if err != nil {
return err
}
// 如果还有剩余分片,分批获取上传地址并上传
for i := 100; i < len(partInfos); i += 100 {
end := min(i+100, len(partInfos))
batchPartInfos := partInfos[i:end]
moredata := base.Json{
"fileId": resp.Data.FileId,
"uploadId": resp.Data.UploadId,
@ -617,44 +619,13 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
if err != nil {
return err
}
uploadPartInfos = append(uploadPartInfos, moreresp.Data.PartInfos...)
}
// Progress
p := driver.NewProgress(size, up)
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
// 上传所有分片
for _, uploadPartInfo := range uploadPartInfos {
index := uploadPartInfo.PartNumber - 1
partSize := partInfos[index].PartSize
log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos))
limitReader := io.LimitReader(rateLimited, partSize)
// Update Progress
r := io.TeeReader(limitReader, p)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartInfo.UploadUrl, r)
err = d.uploadPersonalParts(ctx, partInfos, moreresp.Data.PartInfos, rateLimited, p)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", fmt.Sprint(partSize))
req.Header.Set("Origin", "https://yun.139.com")
req.Header.Set("Referer", "https://yun.139.com/")
req.ContentLength = partSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
_ = res.Body.Close()
log.Debugf("[139] uploaded: %+v", res)
if res.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
}
}
// 全部分片上传完毕后complete
data = base.Json{
"contentHash": fullHash,
"contentHashAlgorithm": "SHA256",

View File

@ -1,9 +1,11 @@
package _139
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
@ -13,6 +15,7 @@ import (
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
@ -623,3 +626,47 @@ func (d *Yun139) getPersonalCloudHost() string {
}
return d.PersonalCloudHost
}
func (d *Yun139) uploadPersonalParts(ctx context.Context, partInfos []PartInfo, uploadPartInfos []PersonalPartInfo, rateLimited *driver.RateLimitReader, p *driver.Progress) error {
// 确保数组以 PartNumber 从小到大排序
sort.Slice(uploadPartInfos, func(i, j int) bool {
return uploadPartInfos[i].PartNumber < uploadPartInfos[j].PartNumber
})
for _, uploadPartInfo := range uploadPartInfos {
index := uploadPartInfo.PartNumber - 1
if index < 0 || index >= len(partInfos) {
return fmt.Errorf("invalid PartNumber %d: index out of bounds (partInfos length: %d)", uploadPartInfo.PartNumber, len(partInfos))
}
partSize := partInfos[index].PartSize
log.Debugf("[139] uploading part %+v/%+v", index, len(partInfos))
limitReader := io.LimitReader(rateLimited, partSize)
r := io.TeeReader(limitReader, p)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartInfo.UploadUrl, r)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", fmt.Sprint(partSize))
req.Header.Set("Origin", "https://yun.139.com")
req.Header.Set("Referer", "https://yun.139.com/")
req.ContentLength = partSize
err = func() error {
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
log.Debugf("[139] uploaded: %+v", res)
if res.StatusCode != http.StatusOK {
body, _ := io.ReadAll(res.Body)
return fmt.Errorf("unexpected status code: %d, body: %s", res.StatusCode, string(body))
}
return nil
}()
if err != nil {
return err
}
}
return nil
}

View File

@ -1,7 +1,6 @@
package _189_tv
import (
"container/ring"
"context"
"net/http"
"strconv"
@ -12,18 +11,20 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
"github.com/go-resty/resty/v2"
)
type Cloud189TV struct {
model.Storage
Addition
client *resty.Client
tokenInfo *AppSessionResp
uploadThread int
familyTransferFolder *ring.Ring
cleanFamilyTransferFile func()
storageConfig driver.Config
client *resty.Client
tokenInfo *AppSessionResp
uploadThread int
storageConfig driver.Config
TempUuid string
cron *cron.Cron // 新增 cron 字段
}
func (y *Cloud189TV) Config() driver.Config {
@ -79,10 +80,17 @@ func (y *Cloud189TV) Init(ctx context.Context) (err error) {
}
}
y.cron = cron.NewCron(time.Minute * 5)
y.cron.Do(y.keepAlive)
return
}
func (y *Cloud189TV) Drop(ctx context.Context) error {
if y.cron != nil {
y.cron.Stop()
y.cron = nil
}
return nil
}

View File

@ -8,7 +8,6 @@ import (
type Addition struct {
driver.RootID
AccessToken string `json:"access_token"`
TempUuid string
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
Type string `json:"type" type:"select" options:"personal,family" default:"personal"`

View File

@ -66,6 +66,10 @@ func (y *Cloud189TV) AppKeySignatureHeader(url, method string) map[string]string
}
func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, isFamily ...bool) ([]byte, error) {
return y.requestWithRetry(url, method, callback, params, resp, 0, isFamily...)
}
func (y *Cloud189TV) requestWithRetry(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, retryCount int, isFamily ...bool) ([]byte, error) {
req := y.client.R().SetQueryParams(clientSuffix())
if params != nil {
@ -91,7 +95,22 @@ func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, para
if strings.Contains(res.String(), "userSessionBO is null") ||
strings.Contains(res.String(), "InvalidSessionKey") {
return nil, errors.New("session expired")
// 限制重试次数,避免无限递归
if retryCount >= 3 {
y.Addition.AccessToken = ""
op.MustSaveDriverStorage(y)
return nil, errors.New("session expired after retry")
}
// 尝试刷新会话
if err := y.refreshSession(); err != nil {
// 如果刷新失败说明AccessToken也已过期需要重新登录
y.Addition.AccessToken = ""
op.MustSaveDriverStorage(y)
return nil, errors.New("session expired")
}
// 如果刷新成功,则重试原始请求(增加重试计数)
return y.requestWithRetry(url, method, callback, params, resp, retryCount+1, isFamily...)
}
// 处理错误
@ -131,6 +150,7 @@ func (y *Cloud189TV) put(ctx context.Context, url string, headers map[string]str
}
}
// 请求完成后http.Client会Close Request.Body
resp, err := base.HttpClient.Do(req)
if err != nil {
return nil, err
@ -210,7 +230,7 @@ func (y *Cloud189TV) login() (err error) {
var erron RespErr
var tokenInfo AppSessionResp
if y.Addition.AccessToken == "" {
if y.Addition.TempUuid == "" {
if y.TempUuid == "" {
// 获取登录参数
var uuidInfo UuidInfoResp
req.SetResult(&uuidInfo).SetError(&erron)
@ -229,7 +249,7 @@ func (y *Cloud189TV) login() (err error) {
if uuidInfo.Uuid == "" {
return errors.New("uuidInfo is empty")
}
y.Addition.TempUuid = uuidInfo.Uuid
y.TempUuid = uuidInfo.Uuid
op.MustSaveDriverStorage(y)
// 展示二维码
@ -257,7 +277,7 @@ func (y *Cloud189TV) login() (err error) {
// Signature
req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/qrcodeLoginResult.action",
http.MethodGet))
req.SetQueryParam("uuid", y.Addition.TempUuid)
req.SetQueryParam("uuid", y.TempUuid)
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/qrcodeLoginResult.action")
if err != nil {
return
@ -269,7 +289,6 @@ func (y *Cloud189TV) login() (err error) {
return errors.New("E189AccessToken is empty")
}
y.Addition.AccessToken = accessTokenResp.E189AccessToken
y.Addition.TempUuid = ""
}
}
// 获取SessionKey 和 SessionSecret
@ -293,6 +312,44 @@ func (y *Cloud189TV) login() (err error) {
return
}
// refreshSession 尝试使用现有的 AccessToken 刷新会话
func (y *Cloud189TV) refreshSession() (err error) {
var erron RespErr
var tokenInfo AppSessionResp
reqb := y.client.R().SetQueryParams(clientSuffix())
reqb.SetResult(&tokenInfo).SetError(&erron)
// Signature
reqb.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/loginFamilyMerge.action",
http.MethodGet))
reqb.SetQueryParam("e189AccessToken", y.Addition.AccessToken)
_, err = reqb.Execute(http.MethodGet, ApiUrl+"/family/manage/loginFamilyMerge.action")
if err != nil {
return
}
if erron.HasError() {
return &erron
}
y.tokenInfo = &tokenInfo
return nil
}
func (y *Cloud189TV) keepAlive() {
_, err := y.get(ApiUrl+"/keepUserSession.action", func(r *resty.Request) {
r.SetQueryParams(clientSuffix())
}, nil)
if err != nil {
utils.Log.Warnf("189tv: Failed to keep user session alive: %v", err)
// 如果keepAlive失败尝试刷新session
if refreshErr := y.refreshSession(); refreshErr != nil {
utils.Log.Errorf("189tv: Failed to refresh session after keepAlive error: %v", refreshErr)
}
} else {
utils.Log.Debugf("189tv: User session kept alive successfully.")
}
}
func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, isFamily bool, overwrite bool) (model.Obj, error) {
fileMd5 := stream.GetHash().GetHash(utils.MD5)
if len(fileMd5) < utils.MD5.Width {
@ -333,6 +390,10 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
// 网盘中不存在该文件,开始上传
status := GetUploadFileStatusResp{CreateUploadFileResp: *uploadInfo}
// driver.RateLimitReader会尝试Close底层的reader
// 但这里的tempFile是一个*os.FileClose后就没法继续读了
// 所以这里用io.NopCloser包一层
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile))
for status.GetSize() < file.GetSize() && status.FileDataExists != 1 {
if utils.IsCanceled(ctx) {
return nil, ctx.Err()
@ -350,7 +411,7 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
}
_, err := y.put(ctx, status.FileUploadUrl, header, true, tempFile, isFamily)
_, err := y.put(ctx, status.FileUploadUrl, header, true, rateLimitedRd, isFamily)
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
return nil, err
}

View File

@ -12,6 +12,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
"github.com/google/uuid"
@ -21,12 +22,12 @@ type Cloud189PC struct {
model.Storage
Addition
identity string
client *resty.Client
loginParam *LoginParam
tokenInfo *AppSessionResp
loginParam *LoginParam
qrcodeParam *QRLoginParam
tokenInfo *AppSessionResp
uploadThread int
@ -35,6 +36,7 @@ type Cloud189PC struct {
storageConfig driver.Config
ref *Cloud189PC
cron *cron.Cron
}
func (y *Cloud189PC) Config() driver.Config {
@ -84,14 +86,22 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
})
}
// 避免重复登陆
identity := utils.GetMD5EncodeStr(y.Username + y.Password)
if !y.isLogin() || y.identity != identity {
y.identity = identity
// 先尝试用Token刷新之后尝试登陆
if y.Addition.RefreshToken != "" {
y.tokenInfo = &AppSessionResp{RefreshToken: y.Addition.RefreshToken}
if err = y.refreshToken(); err != nil {
return
}
} else {
if err = y.login(); err != nil {
return
}
}
// 初始化并启动 cron 任务
y.cron = cron.NewCron(time.Duration(time.Minute * 5))
// 每5分钟执行一次 keepAlive
y.cron.Do(y.keepAlive)
}
// 处理家庭云ID
@ -128,6 +138,10 @@ func (d *Cloud189PC) InitReference(storage driver.Driver) error {
func (y *Cloud189PC) Drop(ctx context.Context) error {
y.ref = nil
if y.cron != nil {
y.cron.Stop()
y.cron = nil
}
return nil
}

View File

@ -80,6 +80,20 @@ func timestamp() int64 {
return time.Now().UTC().UnixNano() / 1e6
}
// formatDate formats a time.Time object into the "YYYY-MM-DDHH:mm:ssSSS" format.
func formatDate(t time.Time) string {
// The layout string "2006-01-0215:04:05.000" corresponds to:
// 2006 -> Year (YYYY)
// 01 -> Month (MM)
// 02 -> Day (DD)
// 15 -> Hour (HH)
// 04 -> Minute (mm)
// 05 -> Second (ss)
// 000 -> Millisecond (SSS) with leading zeros
// Note the lack of a separator between the date and hour, matching the desired output.
return t.Format("2006-01-0215:04:05.000")
}
func MustParseTime(str string) *time.Time {
lastOpTime, _ := time.ParseInLocation("2006-01-02 15:04:05 -07", str+" +08", time.Local)
return &lastOpTime

View File

@ -6,9 +6,11 @@ import (
)
type Addition struct {
Username string `json:"username" required:"true"`
Password string `json:"password" required:"true"`
VCode string `json:"validate_code"`
LoginType string `json:"login_type" type:"select" options:"password,qrcode" default:"password" required:"true"`
Username string `json:"username" required:"true"`
Password string `json:"password" required:"true"`
VCode string `json:"validate_code"`
RefreshToken string `json:"refresh_token" help:"To switch accounts, please clear this field"`
driver.RootID
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`

View File

@ -68,15 +68,7 @@ func (e *RespErr) Error() string {
return ""
}
// 登陆需要的参数
type LoginParam struct {
// 加密后的用户名和密码
RsaUsername string
RsaPassword string
// rsa密钥
jRsaKey string
type BaseLoginParam struct {
// 请求头参数
Lt string
ReqId string
@ -88,6 +80,27 @@ type LoginParam struct {
CaptchaToken string
}
// QRLoginParam 用于暂存二维码登录过程中的参数
type QRLoginParam struct {
BaseLoginParam
UUID string `json:"uuid"`
EncodeUUID string `json:"encodeuuid"`
EncryUUID string `json:"encryuuid"`
}
// 登陆需要的参数
type LoginParam struct {
// 加密后的用户名和密码
RsaUsername string
RsaPassword string
// rsa密钥
jRsaKey string
BaseLoginParam
}
// 登陆加密相关
type EncryptConfResp struct {
Result int `json:"result"`

View File

@ -29,6 +29,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/skip2/go-qrcode"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
@ -54,6 +55,9 @@ const (
MAC = "TELEMAC"
CHANNEL_ID = "web_cloud.189.cn"
// Error codes
UserInvalidOpenTokenError = "UserInvalidOpenToken"
)
func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string {
@ -264,7 +268,14 @@ func (y *Cloud189PC) findFileByName(ctx context.Context, searchName string, fold
}
}
func (y *Cloud189PC) login() (err error) {
func (y *Cloud189PC) login() error {
if y.LoginType == "qrcode" {
return y.loginByQRCode()
}
return y.loginByPassword()
}
func (y *Cloud189PC) loginByPassword() (err error) {
// 初始化登陆所需参数
if y.loginParam == nil {
if err = y.initLoginParam(); err != nil {
@ -278,10 +289,15 @@ func (y *Cloud189PC) login() (err error) {
// 销毁登陆参数
y.loginParam = nil
// 遇到错误,重新加载登陆参数(刷新验证码)
if err != nil && y.NoUseOcr {
if err1 := y.initLoginParam(); err1 != nil {
err = fmt.Errorf("err1: %s \nerr2: %s", err, err1)
if err != nil {
if y.NoUseOcr {
if err1 := y.initLoginParam(); err1 != nil {
err = fmt.Errorf("err1: %s \nerr2: %s", err, err1)
}
}
y.Status = err.Error()
op.MustSaveDriverStorage(y)
}
}()
@ -336,14 +352,105 @@ func (y *Cloud189PC) login() (err error) {
err = fmt.Errorf(tokenInfo.ResMessage)
return
}
y.Addition.RefreshToken = tokenInfo.RefreshToken
y.tokenInfo = &tokenInfo
op.MustSaveDriverStorage(y)
return
}
/* 初始化登陆需要的参数
* 如果遇到验证码返回错误
*/
func (y *Cloud189PC) initLoginParam() error {
func (y *Cloud189PC) loginByQRCode() error {
if y.qrcodeParam == nil {
if err := y.initQRCodeParam(); err != nil {
// 二维码也通过错误返回
return err
}
}
var state struct {
Status int `json:"status"`
RedirectUrl string `json:"redirectUrl"`
Msg string `json:"msg"`
}
now := time.Now()
_, err := y.client.R().
SetHeaders(map[string]string{
"Referer": AUTH_URL,
"Reqid": y.qrcodeParam.ReqId,
"lt": y.qrcodeParam.Lt,
}).
SetFormData(map[string]string{
"appId": APP_ID,
"clientType": CLIENT_TYPE,
"returnUrl": RETURN_URL,
"paramId": y.qrcodeParam.ParamId,
"uuid": y.qrcodeParam.UUID,
"encryuuid": y.qrcodeParam.EncryUUID,
"date": formatDate(now),
"timeStamp": fmt.Sprint(now.UTC().UnixNano() / 1e6),
}).
ForceContentType("application/json;charset=UTF-8").
SetResult(&state).
Post(AUTH_URL + "/api/logbox/oauth2/qrcodeLoginState.do")
if err != nil {
return fmt.Errorf("failed to check QR code state: %w", err)
}
switch state.Status {
case 0: // 登录成功
var tokenInfo AppSessionResp
_, err = y.client.R().
SetResult(&tokenInfo).
SetQueryParams(clientSuffix()).
SetQueryParam("redirectURL", state.RedirectUrl).
Post(API_URL + "/getSessionForPC.action")
if err != nil {
return err
}
if tokenInfo.ResCode != 0 {
return fmt.Errorf(tokenInfo.ResMessage)
}
y.Addition.RefreshToken = tokenInfo.RefreshToken
y.tokenInfo = &tokenInfo
op.MustSaveDriverStorage(y)
return nil
case -11001: // 二维码过期
y.qrcodeParam = nil
return errors.New("QR code expired, please try again")
case -106: // 等待扫描
return y.genQRCode("QR code has not been scanned yet, please scan and save again")
case -11002: // 等待确认
return y.genQRCode("QR code has been scanned, please confirm the login on your phone and save again")
default: // 其他错误
y.qrcodeParam = nil
return fmt.Errorf("QR code login failed with status %d: %s", state.Status, state.Msg)
}
}
func (y *Cloud189PC) genQRCode(text string) error {
// 展示二维码
qrTemplate := `<body>
state: %s
<br><img src="data:image/jpeg;base64,%s"/>
<br>Or Click here: <a href="%s">Login</a>
</body>`
// Generate QR code
qrCode, err := qrcode.Encode(y.qrcodeParam.UUID, qrcode.Medium, 256)
if err != nil {
return fmt.Errorf("failed to generate QR code: %v", err)
}
// Encode QR code to base64
qrCodeBase64 := base64.StdEncoding.EncodeToString(qrCode)
// Create the HTML page
qrPage := fmt.Sprintf(qrTemplate, text, qrCodeBase64, y.qrcodeParam.UUID)
return fmt.Errorf("need verify: \n%s", qrPage)
}
func (y *Cloud189PC) initBaseParams() (*BaseLoginParam, error) {
// 清除cookie
jar, _ := cookiejar.New(nil)
y.client.SetCookieJar(jar)
@ -357,17 +464,30 @@ func (y *Cloud189PC) initLoginParam() error {
}).
Get(WEB_URL + "/api/portal/unifyLoginForPC.action")
if err != nil {
return err
return nil, err
}
param := LoginParam{
return &BaseLoginParam{
CaptchaToken: regexp.MustCompile(`'captchaToken' value='(.+?)'`).FindStringSubmatch(res.String())[1],
Lt: regexp.MustCompile(`lt = "(.+?)"`).FindStringSubmatch(res.String())[1],
ParamId: regexp.MustCompile(`paramId = "(.+?)"`).FindStringSubmatch(res.String())[1],
ReqId: regexp.MustCompile(`reqId = "(.+?)"`).FindStringSubmatch(res.String())[1],
// jRsaKey: regexp.MustCompile(`"j_rsaKey" value="(.+?)"`).FindStringSubmatch(res.String())[1],
}, nil
}
/* 初始化登陆需要的参数
* 如果遇到验证码返回错误
*/
func (y *Cloud189PC) initLoginParam() error {
y.loginParam = nil
baseParam, err := y.initBaseParams()
if err != nil {
return err
}
y.loginParam = &LoginParam{BaseLoginParam: *baseParam}
// 获取rsa公钥
var encryptConf EncryptConfResp
_, err = y.client.R().
@ -378,18 +498,17 @@ func (y *Cloud189PC) initLoginParam() error {
return err
}
param.jRsaKey = fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", encryptConf.Data.PubKey)
param.RsaUsername = encryptConf.Data.Pre + RsaEncrypt(param.jRsaKey, y.Username)
param.RsaPassword = encryptConf.Data.Pre + RsaEncrypt(param.jRsaKey, y.Password)
y.loginParam = &param
y.loginParam.jRsaKey = fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", encryptConf.Data.PubKey)
y.loginParam.RsaUsername = encryptConf.Data.Pre + RsaEncrypt(y.loginParam.jRsaKey, y.Username)
y.loginParam.RsaPassword = encryptConf.Data.Pre + RsaEncrypt(y.loginParam.jRsaKey, y.Password)
// 判断是否需要验证码
resp, err := y.client.R().
SetHeader("REQID", param.ReqId).
SetHeader("REQID", y.loginParam.ReqId).
SetFormData(map[string]string{
"appKey": APP_ID,
"accountType": ACCOUNT_TYPE,
"userName": param.RsaUsername,
"userName": y.loginParam.RsaUsername,
}).Post(AUTH_URL + "/api/logbox/oauth2/needcaptcha.do")
if err != nil {
return err
@ -401,8 +520,8 @@ func (y *Cloud189PC) initLoginParam() error {
// 拉取验证码
imgRes, err := y.client.R().
SetQueryParams(map[string]string{
"token": param.CaptchaToken,
"REQID": param.ReqId,
"token": y.loginParam.CaptchaToken,
"REQID": y.loginParam.ReqId,
"rnd": fmt.Sprint(timestamp()),
}).
Get(AUTH_URL + "/api/logbox/oauth2/picCaptcha.do")
@ -429,10 +548,38 @@ func (y *Cloud189PC) initLoginParam() error {
return nil
}
// getQRCode 获取并返回二维码
func (y *Cloud189PC) initQRCodeParam() (err error) {
y.qrcodeParam = nil
baseParam, err := y.initBaseParams()
if err != nil {
return err
}
var qrcodeParam QRLoginParam
_, err = y.client.R().
SetFormData(map[string]string{"appId": APP_ID}).
ForceContentType("application/json;charset=UTF-8").
SetResult(&qrcodeParam).
Post(AUTH_URL + "/api/logbox/oauth2/getUUID.do")
if err != nil {
return err
}
qrcodeParam.BaseLoginParam = *baseParam
y.qrcodeParam = &qrcodeParam
return y.genQRCode("please scan the QR code with the 189 Cloud app, then save the settings again.")
}
// 刷新会话
func (y *Cloud189PC) refreshSession() (err error) {
return y.refreshSessionWithRetry(0)
}
func (y *Cloud189PC) refreshSessionWithRetry(retryCount int) (err error) {
if y.ref != nil {
return y.ref.refreshSession()
return y.ref.refreshSessionWithRetry(retryCount)
}
var erron RespErr
var userSessionResp UserSessionResp
@ -449,37 +596,102 @@ func (y *Cloud189PC) refreshSession() (err error) {
return err
}
// 错误影响正常访问,下线该储存
defer func() {
if err != nil {
y.GetStorage().SetStatus(fmt.Sprintf("%+v", err.Error()))
op.MustSaveDriverStorage(y)
}
}()
// token生效刷新token
if erron.HasError() {
if erron.ResCode == "UserInvalidOpenToken" {
if err = y.login(); err != nil {
return err
}
if erron.ResCode == UserInvalidOpenTokenError {
return y.refreshTokenWithRetry(retryCount)
}
return &erron
}
y.tokenInfo.UserSessionResp = userSessionResp
return
return nil
}
// refreshToken 刷新token失败时返回错误不再直接调用login
func (y *Cloud189PC) refreshToken() (err error) {
return y.refreshTokenWithRetry(0)
}
func (y *Cloud189PC) refreshTokenWithRetry(retryCount int) (err error) {
if y.ref != nil {
return y.ref.refreshTokenWithRetry(retryCount)
}
// 限制重试次数,避免无限递归
if retryCount >= 3 {
if y.Addition.RefreshToken != "" {
y.Addition.RefreshToken = ""
op.MustSaveDriverStorage(y)
}
return errors.New("refresh token failed after maximum retries")
}
var erron RespErr
var tokenInfo AppSessionResp
_, err = y.client.R().
SetResult(&tokenInfo).
ForceContentType("application/json;charset=UTF-8").
SetError(&erron).
SetFormData(map[string]string{
"clientId": APP_ID,
"refreshToken": y.tokenInfo.RefreshToken,
"grantType": "refresh_token",
"format": "json",
}).
Post(AUTH_URL + "/api/oauth2/refreshToken.do")
if err != nil {
return err
}
// 如果刷新失败,返回错误给上层处理
if erron.HasError() {
if y.Addition.RefreshToken != "" {
y.Addition.RefreshToken = ""
op.MustSaveDriverStorage(y)
}
// 根据登录类型决定下一步行为
if y.LoginType == "qrcode" {
return errors.New("QR code session has expired, please re-scan the code to log in")
}
// 密码登录模式下,尝试回退到完整登录
return y.login()
}
y.Addition.RefreshToken = tokenInfo.RefreshToken
y.tokenInfo = &tokenInfo
op.MustSaveDriverStorage(y)
return y.refreshSessionWithRetry(retryCount + 1)
}
func (y *Cloud189PC) keepAlive() {
_, err := y.get(API_URL+"/keepUserSession.action", func(r *resty.Request) {
r.SetQueryParams(clientSuffix())
}, nil)
if err != nil {
utils.Log.Warnf("189pc: Failed to keep user session alive: %v", err)
// 如果keepAlive失败尝试刷新session
if refreshErr := y.refreshSession(); refreshErr != nil {
utils.Log.Errorf("189pc: Failed to refresh session after keepAlive error: %v", refreshErr)
}
} else {
utils.Log.Debugf("189pc: User session kept alive successfully.")
}
}
// 普通上传
// 无法上传大小为0的文件
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
size := file.GetSize()
sliceSize := min(size, partSize(size))
// 文件大小
fileSize := file.GetSize()
// 分片大小,不得为文件大小
sliceSize := partSize(fileSize)
params := Params{
"parentFolderId": dstDir.GetID(),
"fileName": url.QueryEscape(file.GetName()),
"fileSize": fmt.Sprint(file.GetSize()),
"sliceSize": fmt.Sprint(sliceSize),
"fileSize": fmt.Sprint(fileSize),
"sliceSize": fmt.Sprint(sliceSize), // 必须为特定分片大小
"lazyCheck": "1",
}
@ -512,10 +724,10 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
retry.DelayType(retry.BackOffDelay))
count := 1
if size > sliceSize {
count = int((size + sliceSize - 1) / sliceSize)
if fileSize > sliceSize {
count = int((fileSize + sliceSize - 1) / sliceSize)
}
lastPartSize := size % sliceSize
lastPartSize := fileSize % sliceSize
if lastPartSize == 0 {
lastPartSize = sliceSize
}
@ -535,9 +747,9 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
break
}
offset := int64((i)-1) * sliceSize
size := sliceSize
partSize := sliceSize
if i == count {
size = lastPartSize
partSize = lastPartSize
}
partInfo := ""
var reader *stream.SectionReader
@ -546,14 +758,14 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
Before: func(ctx context.Context) error {
if reader == nil {
var err error
reader, err = ss.GetSectionReader(offset, size)
reader, err = ss.GetSectionReader(offset, partSize)
if err != nil {
return err
}
silceMd5.Reset()
w, err := utils.CopyWithBuffer(writers, reader)
if w != size {
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", size, w, err)
if w != partSize {
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", partSize, w, err)
}
// 计算块md5并进行hex和base64编码
md5Bytes := silceMd5.Sum(nil)
@ -573,8 +785,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
// step.4 上传切片
uploadUrl := uploadUrls[0]
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
driver.NewLimitedUploadStream(ctx, rateLimitedRd), isFamily)
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily)
if err != nil {
return err
}
@ -595,7 +806,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
fileMd5Hex = strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
}
sliceMd5Hex := fileMd5Hex
if file.GetSize() > sliceSize {
if fileSize > sliceSize {
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
}

View File

@ -79,21 +79,45 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
if !ok {
return nil, errs.ObjectNotFound
}
var ret *model.Object
provider := ""
for _, dst := range dsts {
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
rawPath := stdpath.Join(dst, sub)
obj, err := fs.Get(ctx, rawPath, &fs.GetArgs{NoLog: true})
if err != nil {
continue
}
return &model.Object{
Path: path,
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
storage, err := fs.GetStorage(rawPath, &fs.GetStoragesArgs{})
if ret == nil {
ret = &model.Object{
Path: path,
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}
if !d.ProviderPassThrough || err != nil {
break
}
provider = storage.Config().Name
} else if err != nil || provider != storage.GetStorage().Driver {
provider = ""
break
}
}
if ret == nil {
return nil, errs.ObjectNotFound
}
if provider != "" {
return &model.ObjectProvider{
Object: *ret,
Provider: model.Provider{
Provider: provider,
},
}, nil
}
return nil, errs.ObjectNotFound
return ret, nil
}
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
@ -186,6 +210,35 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, errs.ObjectNotFound
}
func (d *Alias) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
root, sub := d.getRootAndPath(args.Obj.GetPath())
dsts, ok := d.pathMap[root]
if !ok {
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
rawPath := stdpath.Join(dst, sub)
storage, actualPath, err := op.GetStorageAndActualPath(rawPath)
if err != nil {
continue
}
other, ok := storage.(driver.Other)
if !ok {
continue
}
obj, err := op.GetUnwrap(ctx, storage, actualPath)
if err != nil {
continue
}
return other.Other(ctx, model.OtherArgs{
Obj: obj,
Method: args.Method,
Data: args.Data,
})
}
return nil, errs.NotImplement
}
func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if !d.Writable {
return errs.PermissionDenied

View File

@ -15,6 +15,7 @@ type Addition struct {
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
Writable bool `json:"writable" type:"bool" default:"false"`
ProviderPassThrough bool `json:"provider_pass_through" type:"bool" default:"false"`
}
var config = driver.Config{

View File

@ -291,6 +291,21 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
return resp, nil
}
func (d *AliyundriveOpen) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
res, err := d.request(ctx, limiterOther, "/adrive/v1.0/user/getSpaceInfo", http.MethodPost, nil)
if err != nil {
return nil, err
}
total := utils.Json.Get(res, "personal_space_info", "total_size").ToUint64()
used := utils.Json.Get(res, "personal_space_info", "used_size").ToUint64()
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: total - used,
},
}, nil
}
var _ driver.Driver = (*AliyundriveOpen)(nil)
var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
var _ driver.MoveResult = (*AliyundriveOpen)(nil)

View File

@ -20,9 +20,12 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_netdisk"
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_photo"
_ "github.com/OpenListTeam/OpenList/v4/drivers/chaoxing"
_ "github.com/OpenListTeam/OpenList/v4/drivers/chunk"
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve"
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve_v4"
_ "github.com/OpenListTeam/OpenList/v4/drivers/cnb_releases"
_ "github.com/OpenListTeam/OpenList/v4/drivers/crypt"
_ "github.com/OpenListTeam/OpenList/v4/drivers/degoo"
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao"
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/dropbox"
@ -48,6 +51,7 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_app"
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_sharelink"
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist"
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak"
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_open"
@ -59,6 +63,7 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/smb"
_ "github.com/OpenListTeam/OpenList/v4/drivers/strm"
_ "github.com/OpenListTeam/OpenList/v4/drivers/teambition"
_ "github.com/OpenListTeam/OpenList/v4/drivers/teldrive"
_ "github.com/OpenListTeam/OpenList/v4/drivers/terabox"
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder"
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder_browser"

View File

@ -364,4 +364,12 @@ func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string
return nil
}
func (d *BaiduNetdisk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
du, err := d.quota()
if err != nil {
return nil, err
}
return &model.StorageDetails{DiskUsage: *du}, nil
}
var _ driver.Driver = (*BaiduNetdisk)(nil)

View File

@ -189,3 +189,12 @@ type PrecreateResp struct {
// return_type=2
File File `json:"info"`
}
type QuotaResp struct {
Errno int `json:"errno"`
RequestId int64 `json:"request_id"`
Total uint64 `json:"total"`
Used uint64 `json:"used"`
//Free uint64 `json:"free"`
//Expire bool `json:"expire"`
}

View File

@ -381,6 +381,18 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
return maxSliceSize
}
func (d *BaiduNetdisk) quota() (*model.DiskUsage, error) {
var resp QuotaResp
_, err := d.request("https://pan.baidu.com/api/quota", http.MethodGet, nil, &resp)
if err != nil {
return nil, err
}
return &model.DiskUsage{
TotalSpace: resp.Total,
FreeSpace: resp.Total - resp.Used,
}, nil
}
// func encodeURIComponent(str string) string {
// r := url.QueryEscape(str)
// r = strings.ReplaceAll(r, "+", "%20")

488
drivers/chunk/driver.go Normal file
View File

@ -0,0 +1,488 @@
package chunk
import (
"bytes"
"context"
"errors"
"fmt"
"io"
stdpath "path"
"strconv"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
)
type Chunk struct {
model.Storage
Addition
}
func (d *Chunk) Config() driver.Config {
return config
}
func (d *Chunk) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Chunk) Init(ctx context.Context) error {
if d.PartSize <= 0 {
return errors.New("part size must be positive")
}
d.RemotePath = utils.FixAndCleanPath(d.RemotePath)
return nil
}
func (d *Chunk) Drop(ctx context.Context) error {
return nil
}
func (d *Chunk) Get(ctx context.Context, path string) (model.Obj, error) {
if utils.PathEqual(path, "/") {
return &model.Object{
Name: "Root",
IsFolder: true,
Path: "/",
}, nil
}
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
if err != nil {
return nil, err
}
remoteActualPath = stdpath.Join(remoteActualPath, path)
if remoteObj, err := op.Get(ctx, remoteStorage, remoteActualPath); err == nil {
return &model.Object{
Path: path,
Name: remoteObj.GetName(),
Size: remoteObj.GetSize(),
Modified: remoteObj.ModTime(),
IsFolder: remoteObj.IsDir(),
HashInfo: remoteObj.GetHash(),
}, nil
}
remoteActualDir, name := stdpath.Split(remoteActualPath)
chunkName := "[openlist_chunk]" + name
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, chunkName), model.ListArgs{})
if err != nil {
return nil, err
}
var totalSize int64 = 0
// 0号块必须存在
chunkSizes := []int64{-1}
h := make(map[*utils.HashType]string)
var first model.Obj
for _, o := range chunkObjs {
if o.IsDir() {
continue
}
if after, ok := strings.CutPrefix(o.GetName(), "hash_"); ok {
hn, value, ok := strings.Cut(strings.TrimSuffix(after, d.CustomExt), "_")
if ok {
ht, ok := utils.GetHashByName(hn)
if ok {
h[ht] = value
}
}
continue
}
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
if err != nil {
continue
}
totalSize += o.GetSize()
if len(chunkSizes) > idx {
if idx == 0 {
first = o
}
chunkSizes[idx] = o.GetSize()
} else if len(chunkSizes) == idx {
chunkSizes = append(chunkSizes, o.GetSize())
} else {
newChunkSizes := make([]int64, idx+1)
copy(newChunkSizes, chunkSizes)
chunkSizes = newChunkSizes
chunkSizes[idx] = o.GetSize()
}
}
// 检查0号块不等于-1 以支持空文件
// 如果块数量大于1 最后一块不可能为0
// 只检查中间块是否有0
for i, l := 0, len(chunkSizes)-2; ; i++ {
if i == 0 {
if chunkSizes[i] == -1 {
return nil, fmt.Errorf("chunk part[%d] are missing", i)
}
} else if chunkSizes[i] == 0 {
return nil, fmt.Errorf("chunk part[%d] are missing", i)
}
if i >= l {
break
}
}
reqDir, _ := stdpath.Split(path)
objRes := chunkObject{
Object: model.Object{
Path: stdpath.Join(reqDir, chunkName),
Name: name,
Size: totalSize,
Modified: first.ModTime(),
Ctime: first.CreateTime(),
},
chunkSizes: chunkSizes,
}
if len(h) > 0 {
objRes.HashInfo = utils.NewHashInfoByMap(h)
}
return &objRes, nil
}
func (d *Chunk) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
if err != nil {
return nil, err
}
remoteActualDir := stdpath.Join(remoteActualPath, dir.GetPath())
remoteObjs, err := op.List(ctx, remoteStorage, remoteActualDir, model.ListArgs{
ReqPath: args.ReqPath,
Refresh: args.Refresh,
})
if err != nil {
return nil, err
}
result := make([]model.Obj, 0, len(remoteObjs))
for _, obj := range remoteObjs {
rawName := obj.GetName()
if obj.IsDir() {
if name, ok := strings.CutPrefix(rawName, "[openlist_chunk]"); ok {
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, rawName), model.ListArgs{
ReqPath: stdpath.Join(args.ReqPath, rawName),
Refresh: args.Refresh,
})
if err != nil {
return nil, err
}
totalSize := int64(0)
h := make(map[*utils.HashType]string)
first := obj
for _, o := range chunkObjs {
if o.IsDir() {
continue
}
if after, ok := strings.CutPrefix(strings.TrimSuffix(o.GetName(), d.CustomExt), "hash_"); ok {
hn, value, ok := strings.Cut(after, "_")
if ok {
ht, ok := utils.GetHashByName(hn)
if ok {
h[ht] = value
}
continue
}
}
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
if err != nil {
continue
}
if idx == 0 {
first = o
}
totalSize += o.GetSize()
}
objRes := model.Object{
Name: name,
Size: totalSize,
Modified: first.ModTime(),
Ctime: first.CreateTime(),
}
if len(h) > 0 {
objRes.HashInfo = utils.NewHashInfoByMap(h)
}
if !d.Thumbnail {
result = append(result, &objRes)
} else {
thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp")
thumb := fmt.Sprintf("%s/d%s?sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(thumbPath, true),
sign.Sign(thumbPath))
result = append(result, &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
})
}
continue
}
}
if !d.ShowHidden && strings.HasPrefix(rawName, ".") {
continue
}
thumb, ok := model.GetThumb(obj)
objRes := model.Object{
Name: rawName,
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}
if !ok {
result = append(result, &objRes)
} else {
result = append(result, &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
})
}
}
return result, nil
}
func (d *Chunk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
if err != nil {
return nil, err
}
chunkFile, ok := file.(*chunkObject)
remoteActualPath = stdpath.Join(remoteActualPath, file.GetPath())
if !ok {
l, _, err := op.Link(ctx, remoteStorage, remoteActualPath, args)
if err != nil {
return nil, err
}
resultLink := *l
resultLink.SyncClosers = utils.NewSyncClosers(l)
return &resultLink, nil
}
fileSize := chunkFile.GetSize()
mergedRrf := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
start := httpRange.Start
length := httpRange.Length
if length < 0 || start+length > fileSize {
length = fileSize - start
}
if length == 0 {
return io.NopCloser(strings.NewReader("")), nil
}
rs := make([]io.Reader, 0)
cs := make(utils.Closers, 0)
var (
rc io.ReadCloser
readFrom bool
)
for idx, chunkSize := range chunkFile.chunkSizes {
if readFrom {
l, o, err := op.Link(ctx, remoteStorage, stdpath.Join(remoteActualPath, d.getPartName(idx)), args)
if err != nil {
_ = cs.Close()
return nil, err
}
cs = append(cs, l)
chunkSize2 := l.ContentLength
if chunkSize2 <= 0 {
chunkSize2 = o.GetSize()
}
if chunkSize2 != chunkSize {
_ = cs.Close()
return nil, fmt.Errorf("chunk part[%d] size not match", idx)
}
rrf, err := stream.GetRangeReaderFromLink(chunkSize2, l)
if err != nil {
_ = cs.Close()
return nil, err
}
newLength := length - chunkSize2
if newLength >= 0 {
length = newLength
rc, err = rrf.RangeRead(ctx, http_range.Range{Length: -1})
} else {
rc, err = rrf.RangeRead(ctx, http_range.Range{Length: length})
}
if err != nil {
_ = cs.Close()
return nil, err
}
rs = append(rs, rc)
cs = append(cs, rc)
if newLength <= 0 {
return utils.ReadCloser{
Reader: io.MultiReader(rs...),
Closer: &cs,
}, nil
}
} else if newStart := start - chunkSize; newStart >= 0 {
start = newStart
} else {
l, o, err := op.Link(ctx, remoteStorage, stdpath.Join(remoteActualPath, d.getPartName(idx)), args)
if err != nil {
_ = cs.Close()
return nil, err
}
cs = append(cs, l)
chunkSize2 := l.ContentLength
if chunkSize2 <= 0 {
chunkSize2 = o.GetSize()
}
if chunkSize2 != chunkSize {
_ = cs.Close()
return nil, fmt.Errorf("chunk part[%d] size not match", idx)
}
rrf, err := stream.GetRangeReaderFromLink(chunkSize2, l)
if err != nil {
_ = cs.Close()
return nil, err
}
rc, err = rrf.RangeRead(ctx, http_range.Range{Start: start, Length: -1})
if err != nil {
_ = cs.Close()
return nil, err
}
length -= chunkSize2 - start
cs = append(cs, rc)
if length <= 0 {
return utils.ReadCloser{
Reader: rc,
Closer: &cs,
}, nil
}
rs = append(rs, rc)
readFrom = true
}
}
return nil, fmt.Errorf("invalid range: start=%d,length=%d,fileSize=%d", httpRange.Start, httpRange.Length, fileSize)
}
return &model.Link{
RangeReader: stream.RangeReaderFunc(mergedRrf),
}, nil
}
func (d *Chunk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
path := stdpath.Join(d.RemotePath, parentDir.GetPath(), dirName)
return fs.MakeDir(ctx, path)
}
func (d *Chunk) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
src := stdpath.Join(d.RemotePath, srcObj.GetPath())
dst := stdpath.Join(d.RemotePath, dstDir.GetPath())
_, err := fs.Move(ctx, src, dst)
return err
}
func (d *Chunk) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if _, ok := srcObj.(*chunkObject); ok {
newName = "[openlist_chunk]" + newName
}
return fs.Rename(ctx, stdpath.Join(d.RemotePath, srcObj.GetPath()), newName)
}
func (d *Chunk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
dst := stdpath.Join(d.RemotePath, dstDir.GetPath())
src := stdpath.Join(d.RemotePath, srcObj.GetPath())
_, err := fs.Copy(ctx, src, dst)
return err
}
func (d *Chunk) Remove(ctx context.Context, obj model.Obj) error {
return fs.Remove(ctx, stdpath.Join(d.RemotePath, obj.GetPath()))
}
func (d *Chunk) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
if err != nil {
return err
}
if d.Thumbnail && dstDir.GetName() == ".thumbnails" {
return op.Put(ctx, remoteStorage, stdpath.Join(remoteActualPath, dstDir.GetPath()), file, up)
}
upReader := &driver.ReaderUpdatingProgress{
Reader: file,
UpdateProgress: up,
}
dst := stdpath.Join(remoteActualPath, dstDir.GetPath(), "[openlist_chunk]"+file.GetName())
if d.StoreHash {
for ht, value := range file.GetHash().All() {
_ = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
Obj: &model.Object{
Name: fmt.Sprintf("hash_%s_%s%s", ht.Name, value, d.CustomExt),
Size: 1,
Modified: file.ModTime(),
},
Mimetype: "application/octet-stream",
Reader: bytes.NewReader([]byte{0}), // 兼容不支持空文件的驱动
}, nil, true)
}
}
fullPartCount := int(file.GetSize() / d.PartSize)
tailSize := file.GetSize() % d.PartSize
if tailSize == 0 && fullPartCount > 0 {
fullPartCount--
tailSize = d.PartSize
}
partIndex := 0
for partIndex < fullPartCount {
err = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
Obj: &model.Object{
Name: d.getPartName(partIndex),
Size: d.PartSize,
Modified: file.ModTime(),
},
Mimetype: file.GetMimetype(),
Reader: io.LimitReader(upReader, d.PartSize),
}, nil, true)
if err != nil {
_ = op.Remove(ctx, remoteStorage, dst)
return err
}
partIndex++
}
err = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
Obj: &model.Object{
Name: d.getPartName(fullPartCount),
Size: tailSize,
Modified: file.ModTime(),
},
Mimetype: file.GetMimetype(),
Reader: upReader,
}, nil)
if err != nil {
_ = op.Remove(ctx, remoteStorage, dst)
}
return err
}
func (d *Chunk) getPartName(part int) string {
return fmt.Sprintf("%d%s", part, d.CustomExt)
}
func (d *Chunk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
remoteStorage, err := fs.GetStorage(d.RemotePath, &fs.GetStoragesArgs{})
if err != nil {
return nil, errs.NotImplement
}
wd, ok := remoteStorage.(driver.WithDetails)
if !ok {
return nil, errs.NotImplement
}
remoteDetails, err := wd.GetDetails(ctx)
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: remoteDetails.DiskUsage,
}, nil
}
var _ driver.Driver = (*Chunk)(nil)

31
drivers/chunk/meta.go Normal file
View File

@ -0,0 +1,31 @@
package chunk
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
RemotePath string `json:"remote_path" required:"true"`
PartSize int64 `json:"part_size" required:"true" type:"number" help:"bytes"`
CustomExt string `json:"custom_ext" type:"string"`
StoreHash bool `json:"store_hash" type:"bool" default:"true"`
Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"`
ShowHidden bool `json:"show_hidden" default:"true" required:"false" help:"show hidden directories and files"`
}
var config = driver.Config{
Name: "Chunk",
LocalSort: true,
OnlyProxy: true,
NoCache: true,
DefaultRoot: "/",
NoLinkURL: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Chunk{}
})
}

8
drivers/chunk/obj.go Normal file
View File

@ -0,0 +1,8 @@
package chunk
import "github.com/OpenListTeam/OpenList/v4/internal/model"
type chunkObject struct {
model.Object
chunkSizes []int64
}

View File

@ -20,7 +20,9 @@ import (
type CloudreveV4 struct {
model.Storage
Addition
ref *CloudreveV4
ref *CloudreveV4
AccessExpires string
RefreshExpires string
}
func (d *CloudreveV4) Config() driver.Config {
@ -44,13 +46,17 @@ func (d *CloudreveV4) Init(ctx context.Context) error {
if d.ref != nil {
return nil
}
if d.AccessToken == "" && d.RefreshToken != "" {
return d.refreshToken()
}
if d.Username != "" {
if d.canLogin() {
return d.login()
}
return nil
if d.RefreshToken != "" {
return d.refreshToken()
}
if d.AccessToken == "" {
return errors.New("no way to authenticate. At least AccessToken is required")
}
// ensure AccessToken is valid
return d.parseJWT(d.AccessToken, &AccessJWT{})
}
func (d *CloudreveV4) InitReference(storage driver.Driver) error {
@ -333,6 +339,21 @@ func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir mode
return nil, errs.NotImplement
}
func (d *CloudreveV4) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
// TODO return storage details (total space, free space, etc.)
var r CapacityResp
err := d.request(http.MethodGet, "/user/capacity", nil, &r)
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: r.Total,
FreeSpace: r.Total - r.Used,
},
}, nil
}
//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}

View File

@ -66,11 +66,27 @@ type CaptchaResp struct {
Ticket string `json:"ticket"`
}
type AccessJWT struct {
TokenType string `json:"token_type"`
Sub string `json:"sub"`
Exp int64 `json:"exp"`
Nbf int64 `json:"nbf"`
}
type RefreshJWT struct {
TokenType string `json:"token_type"`
Sub string `json:"sub"`
Exp int `json:"exp"`
Nbf int `json:"nbf"`
StateHash string `json:"state_hash"`
RootTokenID string `json:"root_token_id"`
}
type Token struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
AccessExpires time.Time `json:"access_expires"`
RefreshExpires time.Time `json:"refresh_expires"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
AccessExpires string `json:"access_expires"`
RefreshExpires string `json:"refresh_expires"`
}
type TokenResponse struct {
@ -188,3 +204,9 @@ type FolderSummaryResp struct {
CalculatedAt time.Time `json:"calculated_at"`
} `json:"folder_summary"`
}
type CapacityResp struct {
Total uint64 `json:"total"`
Used uint64 `json:"used"`
// StoragePackTotal uint64 `json:"storage_pack_total"`
}

View File

@ -28,6 +28,15 @@ import (
// do others that not defined in Driver interface
const (
CodeLoginRequired = http.StatusUnauthorized
CodeCredentialInvalid = 40020 // Failed to issue token
)
var (
ErrorIssueToken = errors.New("failed to issue token")
)
func (d *CloudreveV4) getUA() string {
if d.CustomUA != "" {
return d.CustomUA
@ -39,6 +48,23 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
if d.ref != nil {
return d.ref.request(method, path, callback, out)
}
// ensure token
if d.isTokenExpired() {
err := d.refreshToken()
if err != nil {
return err
}
}
return d._request(method, path, callback, out)
}
func (d *CloudreveV4) _request(method string, path string, callback base.ReqCallback, out any) error {
if d.ref != nil {
return d.ref._request(method, path, callback, out)
}
u := d.Address + "/api/v4" + path
req := base.RestyClient.R()
req.SetHeaders(map[string]string{
@ -65,15 +91,17 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
}
if r.Code != 0 {
if r.Code == 401 && d.RefreshToken != "" && path != "/session/token/refresh" {
// try to refresh token
err = d.refreshToken()
if r.Code == CodeLoginRequired && d.canLogin() && path != "/session/token/refresh" {
err = d.login()
if err != nil {
return err
}
return d.request(method, path, callback, out)
}
return errors.New(r.Msg)
if r.Code == CodeCredentialInvalid {
return ErrorIssueToken
}
return fmt.Errorf("%d: %s", r.Code, r.Msg)
}
if out != nil && r.Data != nil {
@ -91,14 +119,18 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
return nil
}
func (d *CloudreveV4) canLogin() bool {
return d.Username != "" && d.Password != ""
}
func (d *CloudreveV4) login() error {
var siteConfig SiteLoginConfigResp
err := d.request(http.MethodGet, "/site/config/login", nil, &siteConfig)
err := d._request(http.MethodGet, "/site/config/login", nil, &siteConfig)
if err != nil {
return err
}
var prepareLogin PrepareLoginResp
err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
err = d._request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
if err != nil {
return err
}
@ -128,7 +160,7 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
}
if needCaptcha {
var config BasicConfigResp
err = d.request(http.MethodGet, "/site/config/basic", nil, &config)
err = d._request(http.MethodGet, "/site/config/basic", nil, &config)
if err != nil {
return err
}
@ -136,7 +168,7 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
return fmt.Errorf("captcha type %s not support", config.CaptchaType)
}
var captcha CaptchaResp
err = d.request(http.MethodGet, "/site/captcha", nil, &captcha)
err = d._request(http.MethodGet, "/site/captcha", nil, &captcha)
if err != nil {
return err
}
@ -162,20 +194,22 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
loginBody["captcha"] = captchaCode
}
var token TokenResponse
err = d.request(http.MethodPost, "/session/token", func(req *resty.Request) {
err = d._request(http.MethodPost, "/session/token", func(req *resty.Request) {
req.SetBody(loginBody)
}, &token)
if err != nil {
return err
}
d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken
d.AccessExpires, d.RefreshExpires = token.Token.AccessExpires, token.Token.RefreshExpires
op.MustSaveDriverStorage(d)
return nil
}
func (d *CloudreveV4) refreshToken() error {
// if no refresh token, try to login if possible
if d.RefreshToken == "" {
if d.Username != "" {
if d.canLogin() {
err := d.login()
if err != nil {
return fmt.Errorf("cannot login to get refresh token, error: %s", err)
@ -183,20 +217,127 @@ func (d *CloudreveV4) refreshToken() error {
}
return nil
}
// parse jwt to check if refresh token is valid
var jwt RefreshJWT
err := d.parseJWT(d.RefreshToken, &jwt)
if err != nil {
// if refresh token is invalid, try to login if possible
if d.canLogin() {
return d.login()
}
d.GetStorage().SetStatus(fmt.Sprintf("Invalid RefreshToken: %s", err.Error()))
op.MustSaveDriverStorage(d)
return fmt.Errorf("invalid refresh token: %w", err)
}
// do refresh token
var token Token
err := d.request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
err = d._request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
req.SetBody(base.Json{
"refresh_token": d.RefreshToken,
})
}, &token)
if err != nil {
if errors.Is(err, ErrorIssueToken) {
if d.canLogin() {
// try to login again
return d.login()
}
d.GetStorage().SetStatus("This session is no longer valid")
op.MustSaveDriverStorage(d)
return ErrorIssueToken
}
return err
}
d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken
d.AccessExpires, d.RefreshExpires = token.AccessExpires, token.RefreshExpires
op.MustSaveDriverStorage(d)
return nil
}
func (d *CloudreveV4) parseJWT(token string, jwt any) error {
split := strings.Split(token, ".")
if len(split) != 3 {
return fmt.Errorf("invalid token length: %d, ensure the token is a valid JWT", len(split))
}
data, err := base64.RawURLEncoding.DecodeString(split[1])
if err != nil {
return fmt.Errorf("invalid token encoding: %w, ensure the token is a valid JWT", err)
}
err = json.Unmarshal(data, &jwt)
if err != nil {
return fmt.Errorf("invalid token content: %w, ensure the token is a valid JWT", err)
}
return nil
}
// check if token is expired
// https://github.com/cloudreve/frontend/blob/ddfacc1c31c49be03beb71de4cc114c8811038d6/src/session/index.ts#L177-L200
func (d *CloudreveV4) isTokenExpired() bool {
if d.RefreshToken == "" {
// login again if username and password is set
if d.canLogin() {
return true
}
// no refresh token, cannot refresh
return false
}
if d.AccessToken == "" {
return true
}
var (
err error
expires time.Time
)
// check if token is expired
if d.AccessExpires != "" {
// use expires field if possible to prevent timezone issue
// only available after login or refresh token
// 2025-08-28T02:43:07.645109985+08:00
expires, err = time.Parse(time.RFC3339Nano, d.AccessExpires)
if err != nil {
return false
}
} else {
// fallback to parse jwt
// if failed, disable the storage
var jwt AccessJWT
err = d.parseJWT(d.AccessToken, &jwt)
if err != nil {
d.GetStorage().SetStatus(fmt.Sprintf("Invalid AccessToken: %s", err.Error()))
op.MustSaveDriverStorage(d)
return false
}
// may be have timezone issue
expires = time.Unix(jwt.Exp, 0)
}
// add a 10 minutes safe margin
ddl := time.Now().Add(10 * time.Minute)
if expires.Before(ddl) {
// current access token expired, check if refresh token is expired
// warning: cannot parse refresh token from jwt, because the exp field is not standard
if d.RefreshExpires != "" {
refreshExpires, err := time.Parse(time.RFC3339Nano, d.RefreshExpires)
if err != nil {
return false
}
if refreshExpires.Before(time.Now()) {
// This session is no longer valid
if d.canLogin() {
// try to login again
return true
}
d.GetStorage().SetStatus("This session is no longer valid")
op.MustSaveDriverStorage(d)
return false
}
}
return true
}
return false
}
func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
var finish int64 = 0
var chunk int = 0

View File

@ -0,0 +1,230 @@
package cnb_releases
import (
"bytes"
"context"
"fmt"
"io"
"mime/multipart"
"net/http"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
)
type CnbReleases struct {
model.Storage
Addition
ref *CnbReleases
}
func (d *CnbReleases) Config() driver.Config {
return config
}
func (d *CnbReleases) GetAddition() driver.Additional {
return &d.Addition
}
func (d *CnbReleases) Init(ctx context.Context) error {
return nil
}
func (d *CnbReleases) InitReference(storage driver.Driver) error {
refStorage, ok := storage.(*CnbReleases)
if ok {
d.ref = refStorage
return nil
}
return fmt.Errorf("ref: storage is not CnbReleases")
}
func (d *CnbReleases) Drop(ctx context.Context) error {
d.ref = nil
return nil
}
func (d *CnbReleases) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if dir.GetPath() == "/" {
// get all releases for root dir
var resp ReleaseList
err := d.Request(http.MethodGet, "/{repo}/-/releases", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
}, &resp)
if err != nil {
return nil, err
}
return utils.SliceConvert(resp, func(src Release) (model.Obj, error) {
name := src.Name
if d.UseTagName {
name = src.TagName
}
return &model.Object{
ID: src.ID,
Name: name,
Size: d.sumAssetsSize(src.Assets),
Ctime: src.CreatedAt,
Modified: src.UpdatedAt,
IsFolder: true,
}, nil
})
} else {
// get release info by release id
releaseID := dir.GetID()
if releaseID == "" {
return nil, errs.ObjectNotFound
}
var resp Release
err := d.Request(http.MethodGet, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetPathParam("release_id", releaseID)
}, &resp)
if err != nil {
return nil, err
}
return utils.SliceConvert(resp.Assets, func(src ReleaseAsset) (model.Obj, error) {
return &Object{
Object: model.Object{
ID: src.ID,
Path: src.Path,
Name: src.Name,
Size: src.Size,
Ctime: src.CreatedAt,
Modified: src.UpdatedAt,
IsFolder: false,
},
ParentID: dir.GetID(),
}, nil
})
}
}
func (d *CnbReleases) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
return &model.Link{
URL: "https://cnb.cool" + file.GetPath(),
}, nil
}
func (d *CnbReleases) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if parentDir.GetPath() == "/" {
// create a new release
branch := d.DefaultBranch
if branch == "" {
branch = "main" // fallback to "main" if not set
}
return d.Request(http.MethodPost, "/{repo}/-/releases", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetBody(base.Json{
"name": dirName,
"tag_name": dirName,
"target_commitish": branch,
})
}, nil)
}
return errs.NotImplement
}
func (d *CnbReleases) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return nil, errs.NotImplement
}
func (d *CnbReleases) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if srcObj.IsDir() && !d.UseTagName {
return d.Request(http.MethodPatch, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetPathParam("release_id", srcObj.GetID())
req.SetFormData(map[string]string{
"name": newName,
})
}, nil)
}
return errs.NotImplement
}
func (d *CnbReleases) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return nil, errs.NotImplement
}
func (d *CnbReleases) Remove(ctx context.Context, obj model.Obj) error {
if obj.IsDir() {
return d.Request(http.MethodDelete, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetPathParam("release_id", obj.GetID())
}, nil)
}
if o, ok := obj.(*Object); ok {
return d.Request(http.MethodDelete, "/{repo}/-/releases/{release_id}/assets/{asset_id}", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetPathParam("release_id", o.ParentID)
req.SetPathParam("asset_id", obj.GetID())
}, nil)
} else {
return fmt.Errorf("unable to get release ID")
}
}
func (d *CnbReleases) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
// 1. get upload info
var resp ReleaseAssetUploadURL
err := d.Request(http.MethodPost, "/{repo}/-/releases/{release_id}/asset-upload-url", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetPathParam("release_id", dstDir.GetID())
req.SetBody(base.Json{
"asset_name": file.GetName(),
"overwrite": true,
"size": file.GetSize(),
})
}, &resp)
if err != nil {
return err
}
// 2. upload file
// use multipart to create form file
var b bytes.Buffer
w := multipart.NewWriter(&b)
_, err = w.CreateFormFile("file", file.GetName())
if err != nil {
return err
}
headSize := b.Len()
err = w.Close()
if err != nil {
return err
}
head := bytes.NewReader(b.Bytes()[:headSize])
tail := bytes.NewReader(b.Bytes()[headSize:])
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.MultiReader(head, file, tail))
// use net/http to upload file
ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Duration(resp.ExpiresInSec+1)*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctxWithTimeout, http.MethodPost, resp.UploadURL, rateLimitedRd)
if err != nil {
return err
}
req.Header.Set("Content-Type", w.FormDataContentType())
req.Header.Set("User-Agent", base.UserAgent)
httpResp, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer httpResp.Body.Close()
if httpResp.StatusCode != http.StatusNoContent {
return fmt.Errorf("upload file failed: %s", httpResp.Status)
}
// 3. verify upload
return d.Request(http.MethodPost, resp.VerifyURL, nil, nil)
}
var _ driver.Driver = (*CnbReleases)(nil)

View File

@ -0,0 +1,26 @@
package cnb_releases
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
driver.RootPath
Repo string `json:"repo" type:"string" required:"true"`
Token string `json:"token" type:"string" required:"true"`
UseTagName bool `json:"use_tag_name" type:"bool" default:"false" help:"Use tag name instead of release name"`
DefaultBranch string `json:"default_branch" type:"string" default:"main" help:"Default branch for new releases"`
}
var config = driver.Config{
Name: "CNB Releases",
LocalSort: true,
DefaultRoot: "/",
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &CnbReleases{}
})
}

View File

@ -0,0 +1,100 @@
package cnb_releases
import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
)
type Object struct {
model.Object
ParentID string
}
type TagList []Tag
type Tag struct {
Commit struct {
Author UserInfo `json:"author"`
Commit CommitObject `json:"commit"`
Committer UserInfo `json:"committer"`
Parents []CommitParent `json:"parents"`
Sha string `json:"sha"`
} `json:"commit"`
Name string `json:"name"`
Target string `json:"target"`
TargetType string `json:"target_type"`
Verification TagObjectVerification `json:"verification"`
}
type UserInfo struct {
Freeze bool `json:"freeze"`
Nickname string `json:"nickname"`
Username string `json:"username"`
}
type CommitObject struct {
Author Signature `json:"author"`
CommentCount int `json:"comment_count"`
Committer Signature `json:"committer"`
Message string `json:"message"`
Tree CommitObjectTree `json:"tree"`
Verification CommitObjectVerification `json:"verification"`
}
type Signature struct {
Date time.Time `json:"date"`
Email string `json:"email"`
Name string `json:"name"`
}
type CommitObjectTree struct {
Sha string `json:"sha"`
}
type CommitObjectVerification struct {
Payload string `json:"payload"`
Reason string `json:"reason"`
Signature string `json:"signature"`
Verified bool `json:"verified"`
VerifiedAt string `json:"verified_at"`
}
type CommitParent = CommitObjectTree
type TagObjectVerification = CommitObjectVerification
type ReleaseList []Release
type Release struct {
Assets []ReleaseAsset `json:"assets"`
Author UserInfo `json:"author"`
Body string `json:"body"`
CreatedAt time.Time `json:"created_at"`
Draft bool `json:"draft"`
ID string `json:"id"`
IsLatest bool `json:"is_latest"`
Name string `json:"name"`
Prerelease bool `json:"prerelease"`
PublishedAt time.Time `json:"published_at"`
TagCommitish string `json:"tag_commitish"`
TagName string `json:"tag_name"`
UpdatedAt time.Time `json:"updated_at"`
}
type ReleaseAsset struct {
ContentType string `json:"content_type"`
CreatedAt time.Time `json:"created_at"`
ID string `json:"id"`
Name string `json:"name"`
Path string `json:"path"`
Size int64 `json:"size"`
UpdatedAt time.Time `json:"updated_at"`
Uploader UserInfo `json:"uploader"`
}
type ReleaseAssetUploadURL struct {
UploadURL string `json:"upload_url"`
ExpiresInSec int `json:"expires_in_sec"`
VerifyURL string `json:"verify_url"`
}

View File

@ -0,0 +1,58 @@
package cnb_releases
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
log "github.com/sirupsen/logrus"
)
// do others that not defined in Driver interface
func (d *CnbReleases) Request(method string, path string, callback base.ReqCallback, resp any) error {
if d.ref != nil {
return d.ref.Request(method, path, callback, resp)
}
var url string
if strings.HasPrefix(path, "http") {
url = path
} else {
url = "https://api.cnb.cool" + path
}
req := base.RestyClient.R()
req.SetHeader("Accept", "application/json")
req.SetAuthScheme("Bearer")
req.SetAuthToken(d.Token)
if callback != nil {
callback(req)
}
res, err := req.Execute(method, url)
log.Debugln(res.String())
if err != nil {
return err
}
if res.StatusCode() != http.StatusOK && res.StatusCode() != http.StatusCreated && res.StatusCode() != http.StatusNoContent {
return fmt.Errorf("failed to request %s, status code: %d, message: %s", url, res.StatusCode(), res.String())
}
if resp != nil {
err = json.Unmarshal(res.Body(), resp)
if err != nil {
return err
}
}
return nil
}
func (d *CnbReleases) sumAssetsSize(assets []ReleaseAsset) int64 {
var size int64
for _, asset := range assets {
size += asset.Size
}
return size
}

View File

@ -411,6 +411,20 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
return nil
}
func (d *Crypt) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
wd, ok := d.remoteStorage.(driver.WithDetails)
if !ok {
return nil, errs.NotImplement
}
remoteDetails, err := wd.GetDetails(ctx)
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: remoteDetails.DiskUsage,
}, nil
}
//func (d *Safe) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}

203
drivers/degoo/driver.go Normal file
View File

@ -0,0 +1,203 @@
package degoo
import (
"context"
"fmt"
"net/http"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
type Degoo struct {
model.Storage
Addition
client *http.Client
}
func (d *Degoo) Config() driver.Config {
return config
}
func (d *Degoo) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Degoo) Init(ctx context.Context) error {
d.client = base.HttpClient
// Ensure we have a valid token (will login if needed or refresh if expired)
if err := d.ensureValidToken(ctx); err != nil {
return fmt.Errorf("failed to initialize token: %w", err)
}
return d.getDevices(ctx)
}
func (d *Degoo) Drop(ctx context.Context) error {
return nil
}
func (d *Degoo) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
items, err := d.getAllFileChildren5(ctx, dir.GetID())
if err != nil {
return nil, err
}
return utils.MustSliceConvert(items, func(s DegooFileItem) model.Obj {
isFolder := s.Category == 2 || s.Category == 1 || s.Category == 10
createTime, modTime, _ := humanReadableTimes(s.CreationTime, s.LastModificationTime, s.LastUploadTime)
size, err := strconv.ParseInt(s.Size, 10, 64)
if err != nil {
size = 0 // Default to 0 if size parsing fails
}
return &model.Object{
ID: s.ID,
Path: s.FilePath,
Name: s.Name,
Size: size,
Modified: modTime,
Ctime: createTime,
IsFolder: isFolder,
}
}), nil
}
func (d *Degoo) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
item, err := d.getOverlay4(ctx, file.GetID())
if err != nil {
return nil, err
}
return &model.Link{URL: item.URL}, nil
}
func (d *Degoo) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
// This is done by calling the setUploadFile3 API with a special checksum and size.
const query = `mutation SetUploadFile3($Token: String!, $FileInfos: [FileInfoUpload3]!) { setUploadFile3(Token: $Token, FileInfos: $FileInfos) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"FileInfos": []map[string]interface{}{
{
"Checksum": folderChecksum,
"Name": dirName,
"CreationTime": time.Now().UnixMilli(),
"ParentID": parentDir.GetID(),
"Size": 0,
},
},
}
_, err := d.apiCall(ctx, "SetUploadFile3", query, variables)
if err != nil {
return err
}
return nil
}
func (d *Degoo) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
const query = `mutation SetMoveFile($Token: String!, $Copy: Boolean, $NewParentID: String!, $FileIDs: [String]!) { setMoveFile(Token: $Token, Copy: $Copy, NewParentID: $NewParentID, FileIDs: $FileIDs) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"Copy": false,
"NewParentID": dstDir.GetID(),
"FileIDs": []string{srcObj.GetID()},
}
_, err := d.apiCall(ctx, "SetMoveFile", query, variables)
if err != nil {
return nil, err
}
return srcObj, nil
}
func (d *Degoo) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
const query = `mutation SetRenameFile($Token: String!, $FileRenames: [FileRenameInfo]!) { setRenameFile(Token: $Token, FileRenames: $FileRenames) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"FileRenames": []DegooFileRenameInfo{
{
ID: srcObj.GetID(),
NewName: newName,
},
},
}
_, err := d.apiCall(ctx, "SetRenameFile", query, variables)
if err != nil {
return err
}
return nil
}
func (d *Degoo) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
// Copy is not implemented, Degoo API does not support direct copy.
return nil, errs.NotImplement
}
func (d *Degoo) Remove(ctx context.Context, obj model.Obj) error {
// Remove deletes a file or folder (moves to trash).
const query = `mutation SetDeleteFile5($Token: String!, $IsInRecycleBin: Boolean!, $IDs: [IDType]!) { setDeleteFile5(Token: $Token, IsInRecycleBin: $IsInRecycleBin, IDs: $IDs) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"IsInRecycleBin": false,
"IDs": []map[string]string{{"FileID": obj.GetID()}},
}
_, err := d.apiCall(ctx, "SetDeleteFile5", query, variables)
return err
}
func (d *Degoo) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
tmpF, err := file.CacheFullAndWriter(&up, nil)
if err != nil {
return err
}
parentID := dstDir.GetID()
// Calculate the checksum for the file.
checksum, err := d.checkSum(tmpF)
if err != nil {
return err
}
// 1. Get upload authorization via getBucketWriteAuth4.
auths, err := d.getBucketWriteAuth4(ctx, file, parentID, checksum)
if err != nil {
return err
}
// 2. Upload file.
// support rapid upload
if auths.GetBucketWriteAuth4[0].Error != "Already exist!" {
err = d.uploadS3(ctx, auths, tmpF, file, checksum)
if err != nil {
return err
}
}
// 3. Register metadata with setUploadFile3.
data, err := d.SetUploadFile3(ctx, file, parentID, checksum)
if err != nil {
return err
}
if !data.SetUploadFile3 {
return fmt.Errorf("setUploadFile3 failed: %v", data)
}
return nil
}

27
drivers/degoo/meta.go Normal file
View File

@ -0,0 +1,27 @@
package degoo
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
driver.RootID
Username string `json:"username" help:"Your Degoo account email"`
Password string `json:"password" help:"Your Degoo account password"`
RefreshToken string `json:"refresh_token" help:"Refresh token for automatic token renewal, obtained automatically"`
AccessToken string `json:"access_token" help:"Access token for Degoo API, obtained automatically"`
}
var config = driver.Config{
Name: "Degoo",
LocalSort: true,
DefaultRoot: "0",
NoOverwriteUpload: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Degoo{}
})
}

110
drivers/degoo/types.go Normal file
View File

@ -0,0 +1,110 @@
package degoo
import (
"encoding/json"
)
// DegooLoginRequest represents the login request body.
type DegooLoginRequest struct {
GenerateToken bool `json:"GenerateToken"`
Username string `json:"Username"`
Password string `json:"Password"`
}
// DegooLoginResponse represents a successful login response.
type DegooLoginResponse struct {
Token string `json:"Token"`
RefreshToken string `json:"RefreshToken"`
}
// DegooAccessTokenRequest represents the token refresh request body.
type DegooAccessTokenRequest struct {
RefreshToken string `json:"RefreshToken"`
}
// DegooAccessTokenResponse represents the token refresh response.
type DegooAccessTokenResponse struct {
AccessToken string `json:"AccessToken"`
}
// DegooFileItem represents a Degoo file or folder.
type DegooFileItem struct {
ID string `json:"ID"`
ParentID string `json:"ParentID"`
Name string `json:"Name"`
Category int `json:"Category"`
Size string `json:"Size"`
URL string `json:"URL"`
CreationTime string `json:"CreationTime"`
LastModificationTime string `json:"LastModificationTime"`
LastUploadTime string `json:"LastUploadTime"`
MetadataID string `json:"MetadataID"`
DeviceID int64 `json:"DeviceID"`
FilePath string `json:"FilePath"`
IsInRecycleBin bool `json:"IsInRecycleBin"`
}
type DegooErrors struct {
Path []string `json:"path"`
Data interface{} `json:"data"`
ErrorType string `json:"errorType"`
ErrorInfo interface{} `json:"errorInfo"`
Message string `json:"message"`
}
// DegooGraphqlResponse is the common structure for GraphQL API responses.
type DegooGraphqlResponse struct {
Data json.RawMessage `json:"data"`
Errors []DegooErrors `json:"errors,omitempty"`
}
// DegooGetChildren5Data is the data field for getFileChildren5.
type DegooGetChildren5Data struct {
GetFileChildren5 struct {
Items []DegooFileItem `json:"Items"`
NextToken string `json:"NextToken"`
} `json:"getFileChildren5"`
}
// DegooGetOverlay4Data is the data field for getOverlay4.
type DegooGetOverlay4Data struct {
GetOverlay4 DegooFileItem `json:"getOverlay4"`
}
// DegooFileRenameInfo represents a file rename operation.
type DegooFileRenameInfo struct {
ID string `json:"ID"`
NewName string `json:"NewName"`
}
// DegooFileIDs represents a list of file IDs for move operations.
type DegooFileIDs struct {
FileIDs []string `json:"FileIDs"`
}
// DegooGetBucketWriteAuth4Data is the data field for GetBucketWriteAuth4.
type DegooGetBucketWriteAuth4Data struct {
GetBucketWriteAuth4 []struct {
AuthData struct {
PolicyBase64 string `json:"PolicyBase64"`
Signature string `json:"Signature"`
BaseURL string `json:"BaseURL"`
KeyPrefix string `json:"KeyPrefix"`
AccessKey struct {
Key string `json:"Key"`
Value string `json:"Value"`
} `json:"AccessKey"`
ACL string `json:"ACL"`
AdditionalBody []struct {
Key string `json:"Key"`
Value string `json:"Value"`
} `json:"AdditionalBody"`
} `json:"AuthData"`
Error interface{} `json:"Error"`
} `json:"getBucketWriteAuth4"`
}
// DegooSetUploadFile3Data is the data field for SetUploadFile3.
type DegooSetUploadFile3Data struct {
SetUploadFile3 bool `json:"setUploadFile3"`
}

198
drivers/degoo/upload.go Normal file
View File

@ -0,0 +1,198 @@
package degoo
import (
"bytes"
"context"
"crypto/sha1"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"strconv"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
func (d *Degoo) getBucketWriteAuth4(ctx context.Context, file model.FileStreamer, parentID string, checksum string) (*DegooGetBucketWriteAuth4Data, error) {
const query = `query GetBucketWriteAuth4(
$Token: String!
$ParentID: String!
$StorageUploadInfos: [StorageUploadInfo2]
) {
getBucketWriteAuth4(
Token: $Token
ParentID: $ParentID
StorageUploadInfos: $StorageUploadInfos
) {
AuthData {
PolicyBase64
Signature
BaseURL
KeyPrefix
AccessKey {
Key
Value
}
ACL
AdditionalBody {
Key
Value
}
}
Error
}
}`
variables := map[string]interface{}{
"Token": d.AccessToken,
"ParentID": parentID,
"StorageUploadInfos": []map[string]string{{
"FileName": file.GetName(),
"Checksum": checksum,
"Size": strconv.FormatInt(file.GetSize(), 10),
}}}
data, err := d.apiCall(ctx, "GetBucketWriteAuth4", query, variables)
if err != nil {
return nil, err
}
var resp DegooGetBucketWriteAuth4Data
err = json.Unmarshal(data, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
// checkSum calculates the SHA1-based checksum for Degoo upload API.
func (d *Degoo) checkSum(file io.Reader) (string, error) {
seed := []byte{13, 7, 2, 2, 15, 40, 75, 117, 13, 10, 19, 16, 29, 23, 3, 36}
hasher := sha1.New()
hasher.Write(seed)
if _, err := utils.CopyWithBuffer(hasher, file); err != nil {
return "", err
}
cs := hasher.Sum(nil)
csBytes := []byte{10, byte(len(cs))}
csBytes = append(csBytes, cs...)
csBytes = append(csBytes, 16, 0)
return strings.ReplaceAll(base64.StdEncoding.EncodeToString(csBytes), "/", "_"), nil
}
func (d *Degoo) uploadS3(ctx context.Context, auths *DegooGetBucketWriteAuth4Data, tmpF model.File, file model.FileStreamer, checksum string) error {
a := auths.GetBucketWriteAuth4[0].AuthData
_, err := tmpF.Seek(0, io.SeekStart)
if err != nil {
return err
}
ext := utils.Ext(file.GetName())
key := fmt.Sprintf("%s%s/%s.%s", a.KeyPrefix, ext, checksum, ext)
var b bytes.Buffer
w := multipart.NewWriter(&b)
err = w.WriteField("key", key)
if err != nil {
return err
}
err = w.WriteField("acl", a.ACL)
if err != nil {
return err
}
err = w.WriteField("policy", a.PolicyBase64)
if err != nil {
return err
}
err = w.WriteField("signature", a.Signature)
if err != nil {
return err
}
err = w.WriteField(a.AccessKey.Key, a.AccessKey.Value)
if err != nil {
return err
}
for _, additional := range a.AdditionalBody {
err = w.WriteField(additional.Key, additional.Value)
if err != nil {
return err
}
}
err = w.WriteField("Content-Type", "")
if err != nil {
return err
}
_, err = w.CreateFormFile("file", key)
if err != nil {
return err
}
headSize := b.Len()
err = w.Close()
if err != nil {
return err
}
head := bytes.NewReader(b.Bytes()[:headSize])
tail := bytes.NewReader(b.Bytes()[headSize:])
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.MultiReader(head, tmpF, tail))
req, err := http.NewRequestWithContext(ctx, http.MethodPost, a.BaseURL, rateLimitedRd)
if err != nil {
return err
}
req.Header.Add("ngsw-bypass", "1")
req.Header.Add("Content-Type", w.FormDataContentType())
res, err := d.client.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusNoContent {
return fmt.Errorf("upload failed with status code %d", res.StatusCode)
}
return nil
}
var _ driver.Driver = (*Degoo)(nil)
func (d *Degoo) SetUploadFile3(ctx context.Context, file model.FileStreamer, parentID string, checksum string) (*DegooSetUploadFile3Data, error) {
const query = `mutation SetUploadFile3($Token: String!, $FileInfos: [FileInfoUpload3]!) {
setUploadFile3(Token: $Token, FileInfos: $FileInfos)
}`
variables := map[string]interface{}{
"Token": d.AccessToken,
"FileInfos": []map[string]string{{
"Checksum": checksum,
"CreationTime": strconv.FormatInt(file.CreateTime().UnixMilli(), 10),
"Name": file.GetName(),
"ParentID": parentID,
"Size": strconv.FormatInt(file.GetSize(), 10),
}}}
data, err := d.apiCall(ctx, "SetUploadFile3", query, variables)
if err != nil {
return nil, err
}
var resp DegooSetUploadFile3Data
err = json.Unmarshal(data, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}

462
drivers/degoo/util.go Normal file
View File

@ -0,0 +1,462 @@
package degoo
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
// Thanks to https://github.com/bernd-wechner/Degoo for API research.
const (
// API endpoints
loginURL = "https://rest-api.degoo.com/login"
accessTokenURL = "https://rest-api.degoo.com/access-token/v2"
apiURL = "https://production-appsync.degoo.com/graphql"
// API configuration
apiKey = "da2-vs6twz5vnjdavpqndtbzg3prra"
folderChecksum = "CgAQAg"
// Token management
tokenRefreshThreshold = 5 * time.Minute
// Rate limiting
minRequestInterval = 1 * time.Second
// Error messages
errRateLimited = "rate limited (429), please try again later"
errUnauthorized = "unauthorized access"
)
var (
// Global rate limiting - protects against concurrent API calls
lastRequestTime time.Time
requestMutex sync.Mutex
)
// JWT payload structure for token expiration checking
type JWTPayload struct {
UserID string `json:"userID"`
Exp int64 `json:"exp"`
Iat int64 `json:"iat"`
}
// Rate limiting helper functions
// applyRateLimit ensures minimum interval between API requests
func applyRateLimit() {
requestMutex.Lock()
defer requestMutex.Unlock()
if !lastRequestTime.IsZero() {
if elapsed := time.Since(lastRequestTime); elapsed < minRequestInterval {
time.Sleep(minRequestInterval - elapsed)
}
}
lastRequestTime = time.Now()
}
// HTTP request helper functions
// createJSONRequest creates a new HTTP request with JSON body
func createJSONRequest(ctx context.Context, method, url string, body interface{}) (*http.Request, error) {
jsonBody, err := json.Marshal(body)
if err != nil {
return nil, fmt.Errorf("failed to marshal request body: %w", err)
}
req, err := http.NewRequestWithContext(ctx, method, url, bytes.NewBuffer(jsonBody))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", base.UserAgent)
return req, nil
}
// checkHTTPResponse checks for common HTTP error conditions
func checkHTTPResponse(resp *http.Response, operation string) error {
if resp.StatusCode == http.StatusTooManyRequests {
return fmt.Errorf("%s %s", operation, errRateLimited)
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s failed: %s", operation, resp.Status)
}
return nil
}
// isTokenExpired checks if the JWT token is expired or will expire soon
func (d *Degoo) isTokenExpired() bool {
if d.AccessToken == "" {
return true
}
payload, err := extractJWTPayload(d.AccessToken)
if err != nil {
return true // Invalid token format
}
// Check if token expires within the threshold
expireTime := time.Unix(payload.Exp, 0)
return time.Now().Add(tokenRefreshThreshold).After(expireTime)
}
// extractJWTPayload extracts and parses JWT payload
func extractJWTPayload(token string) (*JWTPayload, error) {
parts := strings.Split(token, ".")
if len(parts) != 3 {
return nil, fmt.Errorf("invalid JWT format")
}
// Decode the payload (second part)
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
return nil, fmt.Errorf("failed to decode JWT payload: %w", err)
}
var jwtPayload JWTPayload
if err := json.Unmarshal(payload, &jwtPayload); err != nil {
return nil, fmt.Errorf("failed to parse JWT payload: %w", err)
}
return &jwtPayload, nil
}
// refreshToken attempts to refresh the access token using the refresh token
func (d *Degoo) refreshToken(ctx context.Context) error {
if d.RefreshToken == "" {
return fmt.Errorf("no refresh token available")
}
// Create request
tokenReq := DegooAccessTokenRequest{RefreshToken: d.RefreshToken}
req, err := createJSONRequest(ctx, "POST", accessTokenURL, tokenReq)
if err != nil {
return fmt.Errorf("failed to create refresh token request: %w", err)
}
// Execute request
resp, err := d.client.Do(req)
if err != nil {
return fmt.Errorf("refresh token request failed: %w", err)
}
defer resp.Body.Close()
// Check response
if err := checkHTTPResponse(resp, "refresh token"); err != nil {
return err
}
var accessTokenResp DegooAccessTokenResponse
if err := json.NewDecoder(resp.Body).Decode(&accessTokenResp); err != nil {
return fmt.Errorf("failed to parse access token response: %w", err)
}
if accessTokenResp.AccessToken == "" {
return fmt.Errorf("empty access token received")
}
d.AccessToken = accessTokenResp.AccessToken
// Save the updated token to storage
op.MustSaveDriverStorage(d)
return nil
}
// ensureValidToken ensures we have a valid, non-expired token
func (d *Degoo) ensureValidToken(ctx context.Context) error {
// Check if token is expired or will expire soon
if d.isTokenExpired() {
// Try to refresh token first if we have a refresh token
if d.RefreshToken != "" {
if refreshErr := d.refreshToken(ctx); refreshErr == nil {
return nil // Successfully refreshed
} else {
// If refresh failed, fall back to full login
fmt.Printf("Token refresh failed, falling back to full login: %v\n", refreshErr)
}
}
// Perform full login
if d.Username != "" && d.Password != "" {
return d.login(ctx)
}
}
return nil
}
// login performs the login process and retrieves the access token.
func (d *Degoo) login(ctx context.Context) error {
if d.Username == "" || d.Password == "" {
return fmt.Errorf("username or password not provided")
}
creds := DegooLoginRequest{
GenerateToken: true,
Username: d.Username,
Password: d.Password,
}
jsonCreds, err := json.Marshal(creds)
if err != nil {
return fmt.Errorf("failed to serialize login credentials: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", loginURL, bytes.NewBuffer(jsonCreds))
if err != nil {
return fmt.Errorf("failed to create login request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", base.UserAgent)
req.Header.Set("Origin", "https://app.degoo.com")
resp, err := d.client.Do(req)
if err != nil {
return fmt.Errorf("login request failed: %w", err)
}
defer resp.Body.Close()
// Handle rate limiting (429 Too Many Requests)
if resp.StatusCode == http.StatusTooManyRequests {
return fmt.Errorf("login rate limited (429), please try again later")
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("login failed: %s", resp.Status)
}
var loginResp DegooLoginResponse
if err := json.NewDecoder(resp.Body).Decode(&loginResp); err != nil {
return fmt.Errorf("failed to parse login response: %w", err)
}
if loginResp.RefreshToken != "" {
tokenReq := DegooAccessTokenRequest{RefreshToken: loginResp.RefreshToken}
jsonTokenReq, err := json.Marshal(tokenReq)
if err != nil {
return fmt.Errorf("failed to serialize access token request: %w", err)
}
tokenReqHTTP, err := http.NewRequestWithContext(ctx, "POST", accessTokenURL, bytes.NewBuffer(jsonTokenReq))
if err != nil {
return fmt.Errorf("failed to create access token request: %w", err)
}
tokenReqHTTP.Header.Set("User-Agent", base.UserAgent)
tokenResp, err := d.client.Do(tokenReqHTTP)
if err != nil {
return fmt.Errorf("failed to get access token: %w", err)
}
defer tokenResp.Body.Close()
var accessTokenResp DegooAccessTokenResponse
if err := json.NewDecoder(tokenResp.Body).Decode(&accessTokenResp); err != nil {
return fmt.Errorf("failed to parse access token response: %w", err)
}
d.AccessToken = accessTokenResp.AccessToken
d.RefreshToken = loginResp.RefreshToken // Save refresh token
} else if loginResp.Token != "" {
d.AccessToken = loginResp.Token
d.RefreshToken = "" // Direct token, no refresh token available
} else {
return fmt.Errorf("login failed, no valid token returned")
}
// Save the updated tokens to storage
op.MustSaveDriverStorage(d)
return nil
}
// apiCall performs a Degoo GraphQL API request.
func (d *Degoo) apiCall(ctx context.Context, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
// Apply rate limiting
applyRateLimit()
// Ensure we have a valid token before making the API call
if err := d.ensureValidToken(ctx); err != nil {
return nil, fmt.Errorf("failed to ensure valid token: %w", err)
}
// Update the Token in variables if it exists (after potential refresh)
d.updateTokenInVariables(variables)
return d.executeGraphQLRequest(ctx, operationName, query, variables)
}
// updateTokenInVariables updates the Token field in GraphQL variables
func (d *Degoo) updateTokenInVariables(variables map[string]interface{}) {
if variables != nil {
if _, hasToken := variables["Token"]; hasToken {
variables["Token"] = d.AccessToken
}
}
}
// executeGraphQLRequest executes a GraphQL request with retry logic
func (d *Degoo) executeGraphQLRequest(ctx context.Context, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
reqBody := map[string]interface{}{
"operationName": operationName,
"query": query,
"variables": variables,
}
// Create and configure request
req, err := createJSONRequest(ctx, "POST", apiURL, reqBody)
if err != nil {
return nil, err
}
// Set Degoo-specific headers
req.Header.Set("x-api-key", apiKey)
if d.AccessToken != "" {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.AccessToken))
}
// Execute request
resp, err := d.client.Do(req)
if err != nil {
return nil, fmt.Errorf("GraphQL API request failed: %w", err)
}
defer resp.Body.Close()
// Check for HTTP errors
if err := checkHTTPResponse(resp, "GraphQL API"); err != nil {
return nil, err
}
// Parse GraphQL response
var degooResp DegooGraphqlResponse
if err := json.NewDecoder(resp.Body).Decode(&degooResp); err != nil {
return nil, fmt.Errorf("failed to decode GraphQL response: %w", err)
}
// Handle GraphQL errors
if len(degooResp.Errors) > 0 {
return d.handleGraphQLError(ctx, degooResp.Errors[0], operationName, query, variables)
}
return degooResp.Data, nil
}
// handleGraphQLError handles GraphQL-level errors with retry logic
func (d *Degoo) handleGraphQLError(ctx context.Context, gqlError DegooErrors, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
if gqlError.ErrorType == "Unauthorized" {
// Re-login and retry
if err := d.login(ctx); err != nil {
return nil, fmt.Errorf("%s, login failed: %w", errUnauthorized, err)
}
// Update token in variables and retry
d.updateTokenInVariables(variables)
return d.apiCall(ctx, operationName, query, variables)
}
return nil, fmt.Errorf("GraphQL API error: %s", gqlError.Message)
}
// humanReadableTimes converts Degoo timestamps to Go time.Time.
func humanReadableTimes(creation, modification, upload string) (cTime, mTime, uTime time.Time) {
cTime, _ = time.Parse(time.RFC3339, creation)
if modification != "" {
modMillis, _ := strconv.ParseInt(modification, 10, 64)
mTime = time.Unix(0, modMillis*int64(time.Millisecond))
}
if upload != "" {
upMillis, _ := strconv.ParseInt(upload, 10, 64)
uTime = time.Unix(0, upMillis*int64(time.Millisecond))
}
return cTime, mTime, uTime
}
// getDevices fetches and caches top-level devices and folders.
func (d *Degoo) getDevices(ctx context.Context) error {
const query = `query GetFileChildren5($Token: String! $ParentID: String $AllParentIDs: [String] $Limit: Int! $Order: Int! $NextToken: String ) { getFileChildren5(Token: $Token ParentID: $ParentID AllParentIDs: $AllParentIDs Limit: $Limit Order: $Order NextToken: $NextToken) { Items { ParentID } NextToken } }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"ParentID": "0",
"Limit": 10,
"Order": 3,
}
data, err := d.apiCall(ctx, "GetFileChildren5", query, variables)
if err != nil {
return err
}
var resp DegooGetChildren5Data
if err := json.Unmarshal(data, &resp); err != nil {
return fmt.Errorf("failed to parse device list: %w", err)
}
if d.RootFolderID == "0" {
if len(resp.GetFileChildren5.Items) > 0 {
d.RootFolderID = resp.GetFileChildren5.Items[0].ParentID
}
op.MustSaveDriverStorage(d)
}
return nil
}
// getAllFileChildren5 fetches all children of a directory with pagination.
func (d *Degoo) getAllFileChildren5(ctx context.Context, parentID string) ([]DegooFileItem, error) {
const query = `query GetFileChildren5($Token: String! $ParentID: String $AllParentIDs: [String] $Limit: Int! $Order: Int! $NextToken: String ) { getFileChildren5(Token: $Token ParentID: $ParentID AllParentIDs: $AllParentIDs Limit: $Limit Order: $Order NextToken: $NextToken) { Items { ID ParentID Name Category Size CreationTime LastModificationTime LastUploadTime FilePath IsInRecycleBin DeviceID MetadataID } NextToken } }`
var allItems []DegooFileItem
nextToken := ""
for {
variables := map[string]interface{}{
"Token": d.AccessToken,
"ParentID": parentID,
"Limit": 1000,
"Order": 3,
}
if nextToken != "" {
variables["NextToken"] = nextToken
}
data, err := d.apiCall(ctx, "GetFileChildren5", query, variables)
if err != nil {
return nil, err
}
var resp DegooGetChildren5Data
if err := json.Unmarshal(data, &resp); err != nil {
return nil, err
}
allItems = append(allItems, resp.GetFileChildren5.Items...)
if resp.GetFileChildren5.NextToken == "" {
break
}
nextToken = resp.GetFileChildren5.NextToken
}
return allItems, nil
}
// getOverlay4 fetches metadata for a single item by ID.
func (d *Degoo) getOverlay4(ctx context.Context, id string) (DegooFileItem, error) {
const query = `query GetOverlay4($Token: String!, $ID: IDType!) { getOverlay4(Token: $Token, ID: $ID) { ID ParentID Name Category Size CreationTime LastModificationTime LastUploadTime URL FilePath IsInRecycleBin DeviceID MetadataID } }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"ID": map[string]string{
"FileID": id,
},
}
data, err := d.apiCall(ctx, "GetOverlay4", query, variables)
if err != nil {
return DegooFileItem{}, err
}
var resp DegooGetOverlay4Data
if err := json.Unmarshal(data, &resp); err != nil {
return DegooFileItem{}, fmt.Errorf("failed to parse item metadata: %w", err)
}
return resp.GetOverlay4, nil
}

View File

@ -296,6 +296,23 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
return nil, err
}
upToken := utils.Json.Get(res, "upToken").ToString()
if upToken == "-1" {
// 支持秒传
var resp UploadTokenRapidResp
err := utils.Json.Unmarshal(res, &resp)
if err != nil {
return nil, err
}
return &model.Object{
ID: strconv.FormatInt(resp.Map.FileID, 10),
Name: resp.Map.FileName,
Size: s.GetSize(),
Modified: s.ModTime(),
Ctime: s.CreateTime(),
IsFolder: false,
HashInfo: utils.NewHashInfo(utils.MD5, etag),
}, nil
}
now := time.Now()
key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli())
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{

View File

@ -29,9 +29,10 @@ func init() {
op.RegisterDriver(func() driver.Driver {
return &ILanZou{
config: driver.Config{
Name: "ILanZou",
DefaultRoot: "0",
LocalSort: true,
Name: "ILanZou",
DefaultRoot: "0",
LocalSort: true,
NoOverwriteUpload: true,
},
conf: Conf{
base: "https://api.ilanzou.com",
@ -47,9 +48,10 @@ func init() {
op.RegisterDriver(func() driver.Driver {
return &ILanZou{
config: driver.Config{
Name: "FeijiPan",
DefaultRoot: "0",
LocalSort: true,
Name: "FeijiPan",
DefaultRoot: "0",
LocalSort: true,
NoOverwriteUpload: true,
},
conf: Conf{
base: "https://api.feijipan.com",

View File

@ -43,6 +43,18 @@ type Part struct {
ETag string `json:"etag"`
}
type UploadTokenRapidResp struct {
Msg string `json:"msg"`
Code int `json:"code"`
UpToken string `json:"upToken"`
Map struct {
FileIconID int `json:"fileIconId"`
FileName string `json:"fileName"`
FileIcon string `json:"fileIcon"`
FileID int64 `json:"fileId"`
} `json:"map"`
}
type UploadResultResp struct {
Msg string `json:"msg"`
Code int `json:"code"`

View File

@ -51,7 +51,7 @@ func (d *Local) Config() driver.Config {
func (d *Local) Init(ctx context.Context) error {
if d.MkdirPerm == "" {
d.mkdirPerm = 0777
d.mkdirPerm = 0o777
} else {
v, err := strconv.ParseUint(d.MkdirPerm, 8, 32)
if err != nil {
@ -150,6 +150,7 @@ func (d *Local) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
}
return files, nil
}
func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string, fullPath string) model.Obj {
thumb := ""
if d.Thumbnail {
@ -198,7 +199,7 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
path = filepath.Join(d.GetRootPath(), path)
f, err := os.Stat(path)
if err != nil {
if strings.Contains(err.Error(), "cannot find the file") {
if os.IsNotExist(err) {
return nil, errs.ObjectNotFound
}
return nil, err
@ -374,6 +375,13 @@ func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
err = os.Remove(obj.GetPath())
}
} else {
if !utils.Exists(d.RecycleBinPath) {
err = os.MkdirAll(d.RecycleBinPath, 0o755)
if err != nil {
return err
}
}
dstPath := filepath.Join(d.RecycleBinPath, obj.GetName())
if utils.Exists(dstPath) {
dstPath = filepath.Join(d.RecycleBinPath, obj.GetName()+"_"+time.Now().Format("20060102150405"))
@ -427,4 +435,14 @@ func (d *Local) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
return nil
}
func (d *Local) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
du, err := getDiskUsage(d.RootFolderPath)
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: du,
}, nil
}
var _ driver.Driver = (*Local)(nil)

View File

@ -5,8 +5,25 @@ package local
import (
"io/fs"
"strings"
"syscall"
"github.com/OpenListTeam/OpenList/v4/internal/model"
)
func isHidden(f fs.FileInfo, _ string) bool {
return strings.HasPrefix(f.Name(), ".")
}
func getDiskUsage(path string) (model.DiskUsage, error) {
var stat syscall.Statfs_t
err := syscall.Statfs(path, &stat)
if err != nil {
return model.DiskUsage{}, err
}
total := stat.Blocks * uint64(stat.Bsize)
free := stat.Bfree * uint64(stat.Bsize)
return model.DiskUsage{
TotalSpace: total,
FreeSpace: free,
}, nil
}

View File

@ -1,22 +1,51 @@
//go:build windows
package local
import (
"io/fs"
"path/filepath"
"syscall"
)
func isHidden(f fs.FileInfo, fullPath string) bool {
filePath := filepath.Join(fullPath, f.Name())
namePtr, err := syscall.UTF16PtrFromString(filePath)
if err != nil {
return false
}
attrs, err := syscall.GetFileAttributes(namePtr)
if err != nil {
return false
}
return attrs&syscall.FILE_ATTRIBUTE_HIDDEN != 0
}
//go:build windows
package local
import (
"errors"
"io/fs"
"path/filepath"
"syscall"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"golang.org/x/sys/windows"
)
func isHidden(f fs.FileInfo, fullPath string) bool {
filePath := filepath.Join(fullPath, f.Name())
namePtr, err := syscall.UTF16PtrFromString(filePath)
if err != nil {
return false
}
attrs, err := syscall.GetFileAttributes(namePtr)
if err != nil {
return false
}
return attrs&syscall.FILE_ATTRIBUTE_HIDDEN != 0
}
func getDiskUsage(path string) (model.DiskUsage, error) {
abs, err := filepath.Abs(path)
if err != nil {
return model.DiskUsage{}, err
}
root := filepath.VolumeName(abs)
if len(root) != 2 || root[1] != ':' {
return model.DiskUsage{}, errors.New("cannot get disk label")
}
var freeBytes, totalBytes, totalFreeBytes uint64
err = windows.GetDiskFreeSpaceEx(
windows.StringToUTF16Ptr(root),
&freeBytes,
&totalBytes,
&totalFreeBytes,
)
if err != nil {
return model.DiskUsage{}, err
}
return model.DiskUsage{
TotalSpace: totalBytes,
FreeSpace: freeBytes,
}, nil
}

View File

@ -0,0 +1,181 @@
package openlist_share
import (
"context"
"fmt"
"net/http"
"net/url"
stdpath "path"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
"github.com/go-resty/resty/v2"
)
type OpenListShare struct {
model.Storage
Addition
serverArchivePreview bool
}
func (d *OpenListShare) Config() driver.Config {
return config
}
func (d *OpenListShare) GetAddition() driver.Additional {
return &d.Addition
}
func (d *OpenListShare) Init(ctx context.Context) error {
d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/")
var settings common.Resp[map[string]string]
_, _, err := d.request("/public/settings", http.MethodGet, func(req *resty.Request) {
req.SetResult(&settings)
})
if err != nil {
return err
}
d.serverArchivePreview = settings.Data["share_archive_preview"] == "true"
return nil
}
func (d *OpenListShare) Drop(ctx context.Context) error {
return nil
}
func (d *OpenListShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
var resp common.Resp[FsListResp]
_, _, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ListReq{
PageReq: model.PageReq{
Page: 1,
PerPage: 0,
},
Path: stdpath.Join(fmt.Sprintf("/@s/%s", d.ShareId), dir.GetPath()),
Password: d.Pwd,
Refresh: false,
})
})
if err != nil {
return nil, err
}
var files []model.Obj
for _, f := range resp.Data.Content {
file := model.ObjThumb{
Object: model.Object{
Name: f.Name,
Modified: f.Modified,
Ctime: f.Created,
Size: f.Size,
IsFolder: f.IsDir,
HashInfo: utils.FromString(f.HashInfo),
},
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
}
files = append(files, &file)
}
return files, nil
}
func (d *OpenListShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
path := utils.FixAndCleanPath(stdpath.Join(d.ShareId, file.GetPath()))
u := fmt.Sprintf("%s/sd%s?pwd=%s", d.Address, path, d.Pwd)
return &model.Link{URL: u}, nil
}
func (d *OpenListShare) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
if !d.serverArchivePreview || !d.ForwardArchiveReq {
return nil, errs.NotImplement
}
var resp common.Resp[ArchiveMetaResp]
_, code, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ArchiveMetaReq{
ArchivePass: args.Password,
Path: stdpath.Join(fmt.Sprintf("/@s/%s", d.ShareId), obj.GetPath()),
Password: d.Pwd,
Refresh: false,
})
})
if code == 202 {
return nil, errs.WrongArchivePassword
}
if err != nil {
return nil, err
}
var tree []model.ObjTree
if resp.Data.Content != nil {
tree = make([]model.ObjTree, 0, len(resp.Data.Content))
for _, content := range resp.Data.Content {
tree = append(tree, &content)
}
}
return &model.ArchiveMetaInfo{
Comment: resp.Data.Comment,
Encrypted: resp.Data.Encrypted,
Tree: tree,
}, nil
}
func (d *OpenListShare) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
if !d.serverArchivePreview || !d.ForwardArchiveReq {
return nil, errs.NotImplement
}
var resp common.Resp[ArchiveListResp]
_, code, err := d.request("/fs/archive/list", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ArchiveListReq{
ArchiveMetaReq: ArchiveMetaReq{
ArchivePass: args.Password,
Path: stdpath.Join(fmt.Sprintf("/@s/%s", d.ShareId), obj.GetPath()),
Password: d.Pwd,
Refresh: false,
},
PageReq: model.PageReq{
Page: 1,
PerPage: 0,
},
InnerPath: args.InnerPath,
})
})
if code == 202 {
return nil, errs.WrongArchivePassword
}
if err != nil {
return nil, err
}
var files []model.Obj
for _, f := range resp.Data.Content {
file := model.ObjThumb{
Object: model.Object{
Name: f.Name,
Modified: f.Modified,
Ctime: f.Created,
Size: f.Size,
IsFolder: f.IsDir,
HashInfo: utils.FromString(f.HashInfo),
},
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
}
files = append(files, &file)
}
return files, nil
}
func (d *OpenListShare) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
if !d.serverArchivePreview || !d.ForwardArchiveReq {
return nil, errs.NotSupport
}
path := utils.FixAndCleanPath(stdpath.Join(d.ShareId, obj.GetPath()))
u := fmt.Sprintf("%s/sad%s?pwd=%s&inner=%s&pass=%s",
d.Address,
path,
d.Pwd,
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password))
return &model.Link{URL: u}, nil
}
var _ driver.Driver = (*OpenListShare)(nil)

View File

@ -0,0 +1,27 @@
package openlist_share
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
driver.RootPath
Address string `json:"url" required:"true"`
ShareId string `json:"sid" required:"true"`
Pwd string `json:"pwd"`
ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"`
}
var config = driver.Config{
Name: "OpenListShare",
LocalSort: true,
NoUpload: true,
DefaultRoot: "/",
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &OpenListShare{}
})
}

View File

@ -0,0 +1,111 @@
package openlist_share
import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
type ListReq struct {
model.PageReq
Path string `json:"path" form:"path"`
Password string `json:"password" form:"password"`
Refresh bool `json:"refresh"`
}
type ObjResp struct {
Name string `json:"name"`
Size int64 `json:"size"`
IsDir bool `json:"is_dir"`
Modified time.Time `json:"modified"`
Created time.Time `json:"created"`
Sign string `json:"sign"`
Thumb string `json:"thumb"`
Type int `json:"type"`
HashInfo string `json:"hashinfo"`
}
type FsListResp struct {
Content []ObjResp `json:"content"`
Total int64 `json:"total"`
Readme string `json:"readme"`
Write bool `json:"write"`
Provider string `json:"provider"`
}
type ArchiveMetaReq struct {
ArchivePass string `json:"archive_pass"`
Password string `json:"password"`
Path string `json:"path"`
Refresh bool `json:"refresh"`
}
type TreeResp struct {
ObjResp
Children []TreeResp `json:"children"`
hashCache *utils.HashInfo
}
func (t *TreeResp) GetSize() int64 {
return t.Size
}
func (t *TreeResp) GetName() string {
return t.Name
}
func (t *TreeResp) ModTime() time.Time {
return t.Modified
}
func (t *TreeResp) CreateTime() time.Time {
return t.Created
}
func (t *TreeResp) IsDir() bool {
return t.ObjResp.IsDir
}
func (t *TreeResp) GetHash() utils.HashInfo {
return utils.FromString(t.HashInfo)
}
func (t *TreeResp) GetID() string {
return ""
}
func (t *TreeResp) GetPath() string {
return ""
}
func (t *TreeResp) GetChildren() []model.ObjTree {
ret := make([]model.ObjTree, 0, len(t.Children))
for _, child := range t.Children {
ret = append(ret, &child)
}
return ret
}
func (t *TreeResp) Thumb() string {
return t.ObjResp.Thumb
}
type ArchiveMetaResp struct {
Comment string `json:"comment"`
Encrypted bool `json:"encrypted"`
Content []TreeResp `json:"content"`
RawURL string `json:"raw_url"`
Sign string `json:"sign"`
}
type ArchiveListReq struct {
model.PageReq
ArchiveMetaReq
InnerPath string `json:"inner_path"`
}
type ArchiveListResp struct {
Content []ObjResp `json:"content"`
Total int64 `json:"total"`
}

View File

@ -0,0 +1,32 @@
package openlist_share
import (
"fmt"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
func (d *OpenListShare) request(api, method string, callback base.ReqCallback) ([]byte, int, error) {
url := d.Address + "/api" + api
req := base.RestyClient.R()
if callback != nil {
callback(req)
}
res, err := req.Execute(method, url)
if err != nil {
code := 0
if res != nil {
code = res.StatusCode()
}
return nil, code, err
}
if res.StatusCode() >= 400 {
return nil, res.StatusCode(), fmt.Errorf("request failed, status: %s", res.Status())
}
code := utils.Json.Get(res.Body(), "code").ToInt()
if code != 200 {
return nil, code, fmt.Errorf("request failed, code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString())
}
return res.Body(), 200, nil
}

View File

@ -149,12 +149,18 @@ func (d *QuarkOrUC) getTranscodingLink(file model.Obj) (*model.Link, error) {
return nil, err
}
return &model.Link{
URL: resp.Data.VideoList[0].VideoInfo.URL,
ContentLength: resp.Data.VideoList[0].VideoInfo.Size,
Concurrency: 3,
PartSize: 10 * utils.MB,
}, nil
for _, info := range resp.Data.VideoList {
if info.VideoInfo.URL != "" {
return &model.Link{
URL: info.VideoInfo.URL,
ContentLength: info.VideoInfo.Size,
Concurrency: 3,
PartSize: 10 * utils.MB,
}, nil
}
}
return nil, errors.New("no link found")
}
func (d *QuarkOrUC) upPre(file model.FileStreamer, parentId string) (UpPreResp, error) {

View File

@ -228,12 +228,18 @@ func (d *QuarkUCTV) getTranscodingLink(ctx context.Context, file model.Obj) (*mo
return nil, err
}
return &model.Link{
URL: fileLink.Data.VideoInfo[0].URL,
Concurrency: 3,
PartSize: 10 * utils.MB,
ContentLength: fileLink.Data.VideoInfo[0].Size,
}, nil
for _, info := range fileLink.Data.VideoInfo {
if info.URL != "" {
return &model.Link{
URL: info.URL,
ContentLength: info.Size,
Concurrency: 3,
PartSize: 10 * utils.MB,
}, nil
}
}
return nil, errors.New("no link found")
}
func (d *QuarkUCTV) getDownloadLink(ctx context.Context, file model.Obj) (*model.Link, error) {

View File

@ -4,6 +4,7 @@ import (
"context"
"os"
"path"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
@ -127,4 +128,22 @@ func (d *SFTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
return err
}
func (d *SFTP) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
stat, err := d.client.StatVFS(d.RootFolderPath)
if err != nil {
if strings.Contains(err.Error(), "unimplemented") {
return nil, errs.NotImplement
}
return nil, err
}
total := stat.Blocks * stat.Bsize
free := stat.Bfree * stat.Bsize
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: free,
},
}, nil
}
var _ driver.Driver = (*SFTP)(nil)

View File

@ -205,6 +205,22 @@ func (d *SMB) Put(ctx context.Context, dstDir model.Obj, stream model.FileStream
return nil
}
func (d *SMB) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
if err := d.checkConn(); err != nil {
return nil, err
}
stat, err := d.fs.Statfs(d.RootFolderPath)
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: stat.BlockSize() * stat.TotalBlockCount(),
FreeSpace: stat.BlockSize() * stat.AvailableBlockCount(),
},
}, nil
}
//func (d *SMB) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}

137
drivers/teldrive/copy.go Normal file
View File

@ -0,0 +1,137 @@
package teldrive
import (
"fmt"
"net/http"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
func NewCopyManager(ctx context.Context, concurrent int, d *Teldrive) *CopyManager {
g, ctx := errgroup.WithContext(ctx)
return &CopyManager{
TaskChan: make(chan CopyTask, concurrent*2),
Sem: semaphore.NewWeighted(int64(concurrent)),
G: g,
Ctx: ctx,
d: d,
}
}
func (cm *CopyManager) startWorkers() {
workerCount := cap(cm.TaskChan) / 2
for i := 0; i < workerCount; i++ {
cm.G.Go(func() error {
return cm.worker()
})
}
}
func (cm *CopyManager) worker() error {
for {
select {
case task, ok := <-cm.TaskChan:
if !ok {
return nil
}
if err := cm.Sem.Acquire(cm.Ctx, 1); err != nil {
return err
}
var err error
err = cm.processFile(task)
cm.Sem.Release(1)
if err != nil {
return fmt.Errorf("task processing failed: %w", err)
}
case <-cm.Ctx.Done():
return cm.Ctx.Err()
}
}
}
func (cm *CopyManager) generateTasks(ctx context.Context, srcObj, dstDir model.Obj) error {
if srcObj.IsDir() {
return cm.generateFolderTasks(ctx, srcObj, dstDir)
} else {
// add single file task directly
select {
case cm.TaskChan <- CopyTask{SrcObj: srcObj, DstDir: dstDir}:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
}
func (cm *CopyManager) generateFolderTasks(ctx context.Context, srcDir, dstDir model.Obj) error {
objs, err := cm.d.List(ctx, srcDir, model.ListArgs{})
if err != nil {
return fmt.Errorf("failed to list directory %s: %w", srcDir.GetPath(), err)
}
err = cm.d.MakeDir(cm.Ctx, dstDir, srcDir.GetName())
if err != nil || len(objs) == 0 {
return err
}
newDstDir := &model.Object{
ID: dstDir.GetID(),
Path: dstDir.GetPath() + "/" + srcDir.GetName(),
Name: srcDir.GetName(),
IsFolder: true,
}
for _, file := range objs {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
srcFile := &model.Object{
ID: file.GetID(),
Path: srcDir.GetPath() + "/" + file.GetName(),
Name: file.GetName(),
IsFolder: file.IsDir(),
}
// 递归生成任务
if err := cm.generateTasks(ctx, srcFile, newDstDir); err != nil {
return err
}
}
return nil
}
func (cm *CopyManager) processFile(task CopyTask) error {
return cm.copySingleFile(cm.Ctx, task.SrcObj, task.DstDir)
}
func (cm *CopyManager) copySingleFile(ctx context.Context, srcObj, dstDir model.Obj) error {
// `override copy mode` should delete the existing file
if obj, err := cm.d.getFile(dstDir.GetPath(), srcObj.GetName(), srcObj.IsDir()); err == nil {
if err := cm.d.Remove(ctx, obj); err != nil {
return fmt.Errorf("failed to remove existing file: %w", err)
}
}
// Do copy
return cm.d.request(http.MethodPost, "/api/files/{id}/copy", func(req *resty.Request) {
req.SetPathParam("id", srcObj.GetID())
req.SetBody(base.Json{
"newName": srcObj.GetName(),
"destination": dstDir.GetPath(),
})
}, nil)
}

217
drivers/teldrive/driver.go Normal file
View File

@ -0,0 +1,217 @@
package teldrive
import (
"context"
"fmt"
"math"
"net/http"
"net/url"
"strings"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
"github.com/google/uuid"
)
type Teldrive struct {
model.Storage
Addition
}
func (d *Teldrive) Config() driver.Config {
return config
}
func (d *Teldrive) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Teldrive) Init(ctx context.Context) error {
d.Address = strings.TrimSuffix(d.Address, "/")
if d.Cookie == "" || !strings.HasPrefix(d.Cookie, "access_token=") {
return fmt.Errorf("cookie must start with 'access_token='")
}
if d.UploadConcurrency == 0 {
d.UploadConcurrency = 4
}
if d.ChunkSize == 0 {
d.ChunkSize = 10
}
op.MustSaveDriverStorage(d)
return nil
}
func (d *Teldrive) Drop(ctx context.Context) error {
return nil
}
func (d *Teldrive) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
var listResp ListResp
err := d.request(http.MethodGet, "/api/files", func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"path": dir.GetPath(),
"limit": "1000", // overide default 500, TODO pagination
})
}, &listResp)
if err != nil {
return nil, err
}
return utils.SliceConvert(listResp.Items, func(src Object) (model.Obj, error) {
return &model.Object{
ID: src.ID,
Name: src.Name,
Size: func() int64 {
if src.Type == "folder" {
return 0
}
return src.Size
}(),
IsFolder: src.Type == "folder",
Modified: src.UpdatedAt,
}, nil
})
}
func (d *Teldrive) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.UseShareLink {
shareObj, err := d.getShareFileById(file.GetID())
if err != nil || shareObj == nil {
if err := d.createShareFile(file.GetID()); err != nil {
return nil, err
}
shareObj, err = d.getShareFileById(file.GetID())
if err != nil {
return nil, err
}
}
return &model.Link{
URL: d.Address + "/api/shares/" + url.PathEscape(shareObj.Id) + "/files/" + url.PathEscape(file.GetID()) + "/" + url.PathEscape(file.GetName()),
}, nil
}
return &model.Link{
URL: d.Address + "/api/files/" + url.PathEscape(file.GetID()) + "/" + url.PathEscape(file.GetName()),
Header: http.Header{
"Cookie": {d.Cookie},
},
}, nil
}
func (d *Teldrive) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
return d.request(http.MethodPost, "/api/files/mkdir", func(req *resty.Request) {
req.SetBody(map[string]interface{}{
"path": parentDir.GetPath() + "/" + dirName,
})
}, nil)
}
func (d *Teldrive) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
body := base.Json{
"ids": []string{srcObj.GetID()},
"destinationParent": dstDir.GetID(),
}
return d.request(http.MethodPost, "/api/files/move", func(req *resty.Request) {
req.SetBody(body)
}, nil)
}
func (d *Teldrive) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
body := base.Json{
"name": newName,
}
return d.request(http.MethodPatch, "/api/files/{id}", func(req *resty.Request) {
req.SetPathParam("id", srcObj.GetID())
req.SetBody(body)
}, nil)
}
func (d *Teldrive) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
copyConcurrentLimit := 4
copyManager := NewCopyManager(ctx, copyConcurrentLimit, d)
copyManager.startWorkers()
copyManager.G.Go(func() error {
defer close(copyManager.TaskChan)
return copyManager.generateTasks(ctx, srcObj, dstDir)
})
return copyManager.G.Wait()
}
func (d *Teldrive) Remove(ctx context.Context, obj model.Obj) error {
body := base.Json{
"ids": []string{obj.GetID()},
}
return d.request(http.MethodPost, "/api/files/delete", func(req *resty.Request) {
req.SetBody(body)
}, nil)
}
func (d *Teldrive) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
fileId := uuid.New().String()
chunkSizeInMB := d.ChunkSize
chunkSize := chunkSizeInMB * 1024 * 1024 // Convert MB to bytes
totalSize := file.GetSize()
totalParts := int(math.Ceil(float64(totalSize) / float64(chunkSize)))
maxRetried := 3
// delete the upload task when finished or failed
defer func() {
_ = d.request(http.MethodDelete, "/api/uploads/{id}", func(req *resty.Request) {
req.SetPathParam("id", fileId)
}, nil)
}()
if obj, err := d.getFile(dstDir.GetPath(), file.GetName(), file.IsDir()); err == nil {
if err = d.Remove(ctx, obj); err != nil {
return err
}
}
// start the upload process
if err := d.request(http.MethodGet, "/api/uploads/fileId", func(req *resty.Request) {
req.SetPathParam("id", fileId)
}, nil); err != nil {
return err
}
if totalSize == 0 {
return d.touch(file.GetName(), dstDir.GetPath())
}
if totalParts <= 1 {
return d.doSingleUpload(ctx, dstDir, file, up, totalParts, chunkSize, fileId)
}
return d.doMultiUpload(ctx, dstDir, file, up, maxRetried, totalParts, chunkSize, fileId)
}
func (d *Teldrive) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Teldrive) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Teldrive) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Teldrive) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
// return errs.NotImplement to use an internal archive tool
return nil, errs.NotImplement
}
//func (d *Teldrive) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*Teldrive)(nil)

26
drivers/teldrive/meta.go Normal file
View File

@ -0,0 +1,26 @@
package teldrive
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
driver.RootPath
Address string `json:"url" required:"true"`
Cookie string `json:"cookie" type:"string" required:"true" help:"access_token=xxx"`
UseShareLink bool `json:"use_share_link" type:"bool" default:"false" help:"Create share link when getting link to support 302. If disabled, you need to enable web proxy."`
ChunkSize int64 `json:"chunk_size" type:"number" default:"10" help:"Chunk size in MiB"`
UploadConcurrency int64 `json:"upload_concurrency" type:"number" default:"4" help:"Concurrency upload requests"`
}
var config = driver.Config{
Name: "Teldrive",
DefaultRoot: "/",
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Teldrive{}
})
}

77
drivers/teldrive/types.go Normal file
View File

@ -0,0 +1,77 @@
package teldrive
import (
"context"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
type ErrResp struct {
Code int `json:"code"`
Message string `json:"message"`
}
type Object struct {
ID string `json:"id"`
Name string `json:"name"`
Type string `json:"type"`
MimeType string `json:"mimeType"`
Category string `json:"category,omitempty"`
ParentId string `json:"parentId"`
Size int64 `json:"size"`
Encrypted bool `json:"encrypted"`
UpdatedAt time.Time `json:"updatedAt"`
}
type ListResp struct {
Items []Object `json:"items"`
Meta struct {
Count int `json:"count"`
TotalPages int `json:"totalPages"`
CurrentPage int `json:"currentPage"`
} `json:"meta"`
}
type FilePart struct {
Name string `json:"name"`
PartId int `json:"partId"`
PartNo int `json:"partNo"`
ChannelId int `json:"channelId"`
Size int `json:"size"`
Encrypted bool `json:"encrypted"`
Salt string `json:"salt"`
}
type chunkTask struct {
chunkIdx int
fileName string
chunkSize int64
reader *stream.SectionReader
ss *stream.StreamSectionReader
}
type CopyManager struct {
TaskChan chan CopyTask
Sem *semaphore.Weighted
G *errgroup.Group
Ctx context.Context
d *Teldrive
}
type CopyTask struct {
SrcObj model.Obj
DstDir model.Obj
}
type ShareObj struct {
Id string `json:"id"`
Protected bool `json:"protected"`
UserId int `json:"userId"`
Type string `json:"type"`
Name string `json:"name"`
ExpiresAt time.Time `json:"expiresAt"`
}

373
drivers/teldrive/upload.go Normal file
View File

@ -0,0 +1,373 @@
package teldrive
import (
"fmt"
"io"
"net/http"
"sort"
"strconv"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
"github.com/pkg/errors"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
// create empty file
func (d *Teldrive) touch(name, path string) error {
uploadBody := base.Json{
"name": name,
"type": "file",
"path": path,
}
if err := d.request(http.MethodPost, "/api/files", func(req *resty.Request) {
req.SetBody(uploadBody)
}, nil); err != nil {
return err
}
return nil
}
func (d *Teldrive) createFileOnUploadSuccess(name, id, path string, uploadedFileParts []FilePart, totalSize int64) error {
remoteFileParts, err := d.getFilePart(id)
if err != nil {
return err
}
// check if the uploaded file parts match the remote file parts
if len(remoteFileParts) != len(uploadedFileParts) {
return fmt.Errorf("[Teldrive] file parts count mismatch: expected %d, got %d", len(uploadedFileParts), len(remoteFileParts))
}
formatParts := make([]base.Json, 0)
for _, p := range remoteFileParts {
formatParts = append(formatParts, base.Json{
"id": p.PartId,
"salt": p.Salt,
})
}
uploadBody := base.Json{
"name": name,
"type": "file",
"path": path,
"parts": formatParts,
"size": totalSize,
}
// create file here
if err := d.request(http.MethodPost, "/api/files", func(req *resty.Request) {
req.SetBody(uploadBody)
}, nil); err != nil {
return err
}
return nil
}
func (d *Teldrive) checkFilePartExist(fileId string, partId int) (FilePart, error) {
var uploadedParts []FilePart
var filePart FilePart
if err := d.request(http.MethodGet, "/api/uploads/{id}", func(req *resty.Request) {
req.SetPathParam("id", fileId)
}, &uploadedParts); err != nil {
return filePart, err
}
for _, part := range uploadedParts {
if part.PartId == partId {
return part, nil
}
}
return filePart, nil
}
func (d *Teldrive) getFilePart(fileId string) ([]FilePart, error) {
var uploadedParts []FilePart
if err := d.request(http.MethodGet, "/api/uploads/{id}", func(req *resty.Request) {
req.SetPathParam("id", fileId)
}, &uploadedParts); err != nil {
return nil, err
}
return uploadedParts, nil
}
func (d *Teldrive) singleUploadRequest(fileId string, callback base.ReqCallback, resp interface{}) error {
url := d.Address + "/api/uploads/" + fileId
client := resty.New().SetTimeout(0)
ctx := context.Background()
req := client.R().
SetContext(ctx)
req.SetHeader("Cookie", d.Cookie)
req.SetHeader("Content-Type", "application/octet-stream")
req.SetContentLength(true)
req.AddRetryCondition(func(r *resty.Response, err error) bool {
return false
})
if callback != nil {
callback(req)
}
if resp != nil {
req.SetResult(resp)
}
var e ErrResp
req.SetError(&e)
_req, err := req.Execute(http.MethodPost, url)
if err != nil {
return err
}
if _req.IsError() {
return &e
}
return nil
}
func (d *Teldrive) doSingleUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up model.UpdateProgress,
totalParts int, chunkSize int64, fileId string) error {
totalSize := file.GetSize()
var fileParts []FilePart
var uploaded int64 = 0
ss, err := stream.NewStreamSectionReader(file, int(totalSize), &up)
if err != nil {
return err
}
for uploaded < totalSize {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
curChunkSize := min(totalSize-uploaded, chunkSize)
rd, err := ss.GetSectionReader(uploaded, curChunkSize)
if err != nil {
return err
}
filePart := &FilePart{}
if err := retry.Do(func() error {
if _, err := rd.Seek(0, io.SeekStart); err != nil {
return err
}
if err := d.singleUploadRequest(fileId, func(req *resty.Request) {
uploadParams := map[string]string{
"partName": func() string {
digits := len(fmt.Sprintf("%d", totalParts))
return file.GetName() + fmt.Sprintf(".%0*d", digits, 1)
}(),
"partNo": strconv.Itoa(1),
"fileName": file.GetName(),
}
req.SetQueryParams(uploadParams)
req.SetBody(driver.NewLimitedUploadStream(ctx, rd))
req.SetHeader("Content-Length", strconv.FormatInt(curChunkSize, 10))
}, filePart); err != nil {
return err
}
return nil
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second)); err != nil {
return err
}
if filePart.Name != "" {
fileParts = append(fileParts, *filePart)
uploaded += curChunkSize
up(float64(uploaded) / float64(totalSize))
ss.FreeSectionReader(rd)
}
}
return d.createFileOnUploadSuccess(file.GetName(), fileId, dstDir.GetPath(), fileParts, totalSize)
}
func (d *Teldrive) doMultiUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up model.UpdateProgress,
maxRetried, totalParts int, chunkSize int64, fileId string) error {
concurrent := d.UploadConcurrency
g, ctx := errgroup.WithContext(ctx)
sem := semaphore.NewWeighted(int64(concurrent))
chunkChan := make(chan chunkTask, concurrent*2)
resultChan := make(chan FilePart, concurrent)
totalSize := file.GetSize()
ss, err := stream.NewStreamSectionReader(file, int(totalSize), &up)
if err != nil {
return err
}
ssLock := sync.Mutex{}
g.Go(func() error {
defer close(chunkChan)
chunkIdx := 0
for chunkIdx < totalParts {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
offset := int64(chunkIdx) * chunkSize
curChunkSize := min(totalSize-offset, chunkSize)
ssLock.Lock()
reader, err := ss.GetSectionReader(offset, curChunkSize)
ssLock.Unlock()
if err != nil {
return err
}
task := chunkTask{
chunkIdx: chunkIdx + 1,
chunkSize: curChunkSize,
fileName: file.GetName(),
reader: reader,
ss: ss,
}
// freeSectionReader will be called in d.uploadSingleChunk
select {
case chunkChan <- task:
chunkIdx++
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
for i := 0; i < int(concurrent); i++ {
g.Go(func() error {
for task := range chunkChan {
if err := sem.Acquire(ctx, 1); err != nil {
return err
}
filePart, err := d.uploadSingleChunk(ctx, fileId, task, totalParts, maxRetried)
sem.Release(1)
if err != nil {
return fmt.Errorf("upload chunk %d failed: %w", task.chunkIdx, err)
}
select {
case resultChan <- *filePart:
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
}
var fileParts []FilePart
var collectErr error
collectDone := make(chan struct{})
go func() {
defer close(collectDone)
fileParts = make([]FilePart, 0, totalParts)
done := make(chan error, 1)
go func() {
done <- g.Wait()
close(resultChan)
}()
for {
select {
case filePart, ok := <-resultChan:
if !ok {
collectErr = <-done
return
}
fileParts = append(fileParts, filePart)
case err := <-done:
collectErr = err
return
}
}
}()
<-collectDone
if collectErr != nil {
return fmt.Errorf("multi-upload failed: %w", collectErr)
}
sort.Slice(fileParts, func(i, j int) bool {
return fileParts[i].PartNo < fileParts[j].PartNo
})
return d.createFileOnUploadSuccess(file.GetName(), fileId, dstDir.GetPath(), fileParts, totalSize)
}
func (d *Teldrive) uploadSingleChunk(ctx context.Context, fileId string, task chunkTask, totalParts, maxRetried int) (*FilePart, error) {
filePart := &FilePart{}
retryCount := 0
defer task.ss.FreeSectionReader(task.reader)
for {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
if existingPart, err := d.checkFilePartExist(fileId, task.chunkIdx); err == nil && existingPart.Name != "" {
return &existingPart, nil
}
err := d.singleUploadRequest(fileId, func(req *resty.Request) {
uploadParams := map[string]string{
"partName": func() string {
digits := len(fmt.Sprintf("%d", totalParts))
return task.fileName + fmt.Sprintf(".%0*d", digits, task.chunkIdx)
}(),
"partNo": strconv.Itoa(task.chunkIdx),
"fileName": task.fileName,
}
req.SetQueryParams(uploadParams)
req.SetBody(driver.NewLimitedUploadStream(ctx, task.reader))
req.SetHeader("Content-Length", strconv.Itoa(int(task.chunkSize)))
}, filePart)
if err == nil {
return filePart, nil
}
if retryCount >= maxRetried {
return nil, fmt.Errorf("upload failed after %d retries: %w", maxRetried, err)
}
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
continue
}
retryCount++
utils.Log.Errorf("[Teldrive] upload error: %v, retrying %d times", err, retryCount)
backoffDuration := time.Duration(retryCount*retryCount) * time.Second
if backoffDuration > 30*time.Second {
backoffDuration = 30 * time.Second
}
select {
case <-time.After(backoffDuration):
case <-ctx.Done():
return nil, ctx.Err()
}
}
}

109
drivers/teldrive/util.go Normal file
View File

@ -0,0 +1,109 @@
package teldrive
import (
"fmt"
"net/http"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/go-resty/resty/v2"
)
// do others that not defined in Driver interface
func (d *Teldrive) request(method string, pathname string, callback base.ReqCallback, resp interface{}) error {
url := d.Address + pathname
req := base.RestyClient.R()
req.SetHeader("Cookie", d.Cookie)
if callback != nil {
callback(req)
}
if resp != nil {
req.SetResult(resp)
}
var e ErrResp
req.SetError(&e)
_req, err := req.Execute(method, url)
if err != nil {
return err
}
if _req.IsError() {
return &e
}
return nil
}
func (d *Teldrive) getFile(path, name string, isFolder bool) (model.Obj, error) {
resp := &ListResp{}
err := d.request(http.MethodGet, "/api/files", func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"path": path,
"name": name,
"type": func() string {
if isFolder {
return "folder"
}
return "file"
}(),
"operation": "find",
})
}, resp)
if err != nil {
return nil, err
}
if len(resp.Items) == 0 {
return nil, fmt.Errorf("file not found: %s/%s", path, name)
}
obj := resp.Items[0]
return &model.Object{
ID: obj.ID,
Name: obj.Name,
Size: obj.Size,
IsFolder: obj.Type == "folder",
}, err
}
func (err *ErrResp) Error() string {
if err == nil {
return ""
}
return fmt.Sprintf("[Teldrive] message:%s Error code:%d", err.Message, err.Code)
}
func (d *Teldrive) createShareFile(fileId string) error {
var errResp ErrResp
if err := d.request(http.MethodPost, "/api/files/{id}/share", func(req *resty.Request) {
req.SetPathParam("id", fileId)
req.SetBody(base.Json{
"expiresAt": getDateTime(),
})
}, &errResp); err != nil {
return err
}
if errResp.Message != "" {
return &errResp
}
return nil
}
func (d *Teldrive) getShareFileById(fileId string) (*ShareObj, error) {
var shareObj ShareObj
if err := d.request(http.MethodGet, "/api/files/{id}/share", func(req *resty.Request) {
req.SetPathParam("id", fileId)
}, &shareObj); err != nil {
return nil, err
}
return &shareObj, nil
}
func getDateTime() string {
now := time.Now().UTC()
formattedWithMs := now.Add(time.Hour * 1).Format("2006-01-02T15:04:05.000Z")
return formattedWithMs
}

View File

@ -93,6 +93,11 @@ func (d *Template) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.O
return nil, errs.NotImplement
}
func (d *Template) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
// TODO return storage details (total space, free space, etc.)
return nil, errs.NotImplement
}
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}

View File

@ -36,5 +36,6 @@ func (d *Wopan) getSpaceType() string {
// 20230607214351
func getTime(str string) (time.Time, error) {
return time.Parse("20060102150405", str)
loc := time.FixedZone("UTC+8", 8*60*60)
return time.ParseInLocation("20060102150405", str, loc)
}

View File

@ -5,9 +5,23 @@ umask ${UMASK}
if [ "$1" = "version" ]; then
./openlist version
else
# Check file of /opt/openlist/data permissions for current user
# 检查当前用户是否有当前目录的写和执行权限
if [ -d ./data ]; then
if ! [ -w ./data ] || ! [ -x ./data ]; then
cat <<EOF
Error: Current user does not have write and/or execute permissions for the ./data directory: $(pwd)/data
Please visit https://doc.oplist.org/guide/installation/docker#for-version-after-v4-1-0 for more information.
错误:当前用户没有 ./data 目录($(pwd)/data的写和/或执行权限。
请访问 https://doc.oplist.org/guide/installation/docker#v4-1-0-%E4%BB%A5%E5%90%8E%E7%89%88%E6%9C%AC 获取更多信息。
Exiting...
EOF
exit 1
fi
fi
# Define the target directory path for aria2 service
ARIA2_DIR="/opt/service/start/aria2"
if [ "$RUN_ARIA2" = "true" ]; then
# If aria2 should run and target directory doesn't exist, copy it
if [ ! -d "$ARIA2_DIR" ]; then

2
go.mod
View File

@ -11,7 +11,7 @@ require (
github.com/OpenListTeam/times v0.1.0
github.com/OpenListTeam/wopan-sdk-go v0.1.5
github.com/ProtonMail/go-crypto v1.3.0
github.com/SheltonZhu/115driver v1.1.0
github.com/SheltonZhu/115driver v1.1.1
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
github.com/avast/retry-go v3.0.0+incompatible
github.com/aws/aws-sdk-go v1.55.7

4
go.sum
View File

@ -59,8 +59,8 @@ github.com/RoaringBitmap/roaring/v2 v2.4.5 h1:uGrrMreGjvAtTBobc0g5IrW1D5ldxDQYe2
github.com/RoaringBitmap/roaring/v2 v2.4.5/go.mod h1:FiJcsfkGje/nZBZgCu0ZxCPOKD/hVXDS2dXi7/eUFE0=
github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg=
github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4=
github.com/SheltonZhu/115driver v1.1.0 h1:kA8Vtu5JVWqqJFiTF06+HDb9zVEO6ZSdyjV5HsGx7Wg=
github.com/SheltonZhu/115driver v1.1.0/go.mod h1:rKvNd4Y4OkXv1TMbr/SKjGdcvMQxh6AW5Tw9w0CJb7E=
github.com/SheltonZhu/115driver v1.1.1 h1:9EMhe2ZJflGiAaZbYInw2jqxTcqZNF+DtVDsEy70aFU=
github.com/SheltonZhu/115driver v1.1.1/go.mod h1:rKvNd4Y4OkXv1TMbr/SKjGdcvMQxh6AW5Tw9w0CJb7E=
github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=
github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM=
github.com/aead/ecdh v0.2.0 h1:pYop54xVaq/CEREFEcukHRZfTdjiWvYIsZDXXrBapQQ=

View File

@ -1,10 +1,11 @@
package archives
import (
"fmt"
"io"
"io/fs"
"os"
stdpath "path"
"path/filepath"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
@ -107,7 +108,7 @@ func (Archives) Decompress(ss []*stream.SeekableStream, outputPath string, args
}
if stat.IsDir() {
isDir = true
outputPath = stdpath.Join(outputPath, stat.Name())
outputPath = filepath.Join(outputPath, stat.Name())
err = os.Mkdir(outputPath, 0700)
if err != nil {
return filterPassword(err)
@ -120,11 +121,14 @@ func (Archives) Decompress(ss []*stream.SeekableStream, outputPath string, args
return err
}
relPath := strings.TrimPrefix(p, path+"/")
dstPath := stdpath.Join(outputPath, relPath)
dstPath := filepath.Join(outputPath, relPath)
if !strings.HasPrefix(dstPath, outputPath+string(os.PathSeparator)) {
return fmt.Errorf("illegal file path: %s", relPath)
}
if d.IsDir() {
err = os.MkdirAll(dstPath, 0700)
} else {
dir := stdpath.Dir(dstPath)
dir := filepath.Dir(dstPath)
err = decompress(fsys, p, dir, func(_ float64) {})
}
return err

View File

@ -1,10 +1,11 @@
package archives
import (
"fmt"
"io"
fs2 "io/fs"
"os"
stdpath "path"
"path/filepath"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
@ -69,7 +70,11 @@ func decompress(fsys fs2.FS, filePath, targetPath string, up model.UpdateProgres
if err != nil {
return err
}
f, err := os.OpenFile(stdpath.Join(targetPath, stat.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
destPath := filepath.Join(targetPath, stat.Name())
if !strings.HasPrefix(destPath, targetPath+string(os.PathSeparator)) {
return fmt.Errorf("illegal file path: %s", stat.Name())
}
f, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return err
}

View File

@ -1,9 +1,11 @@
package iso9660
import (
"fmt"
"io"
"os"
stdpath "path"
"path/filepath"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
@ -79,7 +81,11 @@ func (ISO9660) Decompress(ss []*stream.SeekableStream, outputPath string, args m
}
if obj.IsDir() {
if args.InnerPath != "/" {
outputPath = stdpath.Join(outputPath, obj.Name())
rootpath := outputPath
outputPath = filepath.Join(outputPath, obj.Name())
if !strings.HasPrefix(outputPath, rootpath+string(os.PathSeparator)) {
return fmt.Errorf("illegal file path: %s", obj.Name())
}
if err = os.MkdirAll(outputPath, 0700); err != nil {
return err
}

View File

@ -1,8 +1,9 @@
package iso9660
import (
"fmt"
"os"
stdpath "path"
"path/filepath"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
@ -62,7 +63,11 @@ func toModelObj(file *iso9660.File) model.Obj {
}
func decompress(f *iso9660.File, path string, up model.UpdateProgress) error {
file, err := os.OpenFile(stdpath.Join(path, f.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
destPath := filepath.Join(path, f.Name())
if !strings.HasPrefix(destPath, path+string(os.PathSeparator)) {
return fmt.Errorf("illegal file path: %s", f.Name())
}
file, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return err
}
@ -84,7 +89,10 @@ func decompressAll(children []*iso9660.File, path string) error {
if err != nil {
return err
}
nextPath := stdpath.Join(path, child.Name())
nextPath := filepath.Join(path, child.Name())
if !strings.HasPrefix(nextPath, path+string(os.PathSeparator)) {
return fmt.Errorf("illegal file path: %s", child.Name())
}
if err = os.MkdirAll(nextPath, 0700); err != nil {
return err
}

View File

@ -3,7 +3,7 @@ package rardecode
import (
"io"
"os"
stdpath "path"
"path/filepath"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
@ -93,7 +93,7 @@ func (RarDecoder) Decompress(ss []*stream.SeekableStream, outputPath string, arg
}
} else {
innerPath := strings.TrimPrefix(args.InnerPath, "/")
innerBase := stdpath.Base(innerPath)
innerBase := filepath.Base(innerPath)
createdBaseDir := false
for {
var header *rardecode.FileHeader
@ -115,7 +115,7 @@ func (RarDecoder) Decompress(ss []*stream.SeekableStream, outputPath string, arg
}
break
} else if strings.HasPrefix(name, innerPath+"/") {
targetPath := stdpath.Join(outputPath, innerBase)
targetPath := filepath.Join(outputPath, innerBase)
if !createdBaseDir {
err = os.Mkdir(targetPath, 0700)
if err != nil {

View File

@ -5,7 +5,7 @@ import (
"io"
"io/fs"
"os"
stdpath "path"
"path/filepath"
"sort"
"strings"
"time"
@ -124,7 +124,7 @@ type WrapFileInfo struct {
}
func (f *WrapFileInfo) Name() string {
return stdpath.Base(f.File.Name)
return filepath.Base(f.File.Name)
}
func (f *WrapFileInfo) Size() int64 {
@ -183,12 +183,16 @@ func getReader(ss []*stream.SeekableStream, password string) (*rardecode.Reader,
func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath, outputPath string) error {
targetPath := outputPath
dir, base := stdpath.Split(filePath)
dir, base := filepath.Split(filePath)
if dir != "" {
targetPath = stdpath.Join(targetPath, dir)
err := os.MkdirAll(targetPath, 0700)
if err != nil {
return err
targetPath = filepath.Join(targetPath, dir)
if strings.HasPrefix(targetPath, outputPath+string(os.PathSeparator)) {
err := os.MkdirAll(targetPath, 0700)
if err != nil {
return err
}
} else {
targetPath = outputPath
}
}
if base != "" {
@ -201,7 +205,11 @@ func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath
}
func _decompress(reader *rardecode.Reader, header *rardecode.FileHeader, targetPath string, up model.UpdateProgress) error {
f, err := os.OpenFile(stdpath.Join(targetPath, stdpath.Base(header.Name)), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
destPath := filepath.Join(targetPath, filepath.Base(header.Name))
if !strings.HasPrefix(destPath, targetPath+string(os.PathSeparator)) {
return fmt.Errorf("illegal file path: %s", filepath.Base(header.Name))
}
f, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return err
}

View File

@ -1,10 +1,11 @@
package tool
import (
"fmt"
"io"
"io/fs"
"os"
stdpath "path"
"path/filepath"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/model"
@ -40,13 +41,13 @@ func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree
isNewFolder := false
if !file.FileInfo().IsDir() {
// 先将 文件 添加到 所在的文件夹
dir = stdpath.Dir(name)
dir = filepath.Dir(name)
dirObj = dirMap[dir]
if dirObj == nil {
isNewFolder = dir != "."
dirObj = &model.ObjectTree{}
dirObj.IsFolder = true
dirObj.Name = stdpath.Base(dir)
dirObj.Name = filepath.Base(dir)
dirObj.Modified = file.FileInfo().ModTime()
dirMap[dir] = dirObj
}
@ -64,28 +65,28 @@ func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree
dirMap[dir] = dirObj
}
dirObj.IsFolder = true
dirObj.Name = stdpath.Base(dir)
dirObj.Name = filepath.Base(dir)
dirObj.Modified = file.FileInfo().ModTime()
}
if isNewFolder {
// 将 文件夹 添加到 父文件夹
// 考虑压缩包仅记录文件的路径,不记录文件夹
// 循环创建所有父文件夹
parentDir := stdpath.Dir(dir)
parentDir := filepath.Dir(dir)
for {
parentDirObj := dirMap[parentDir]
if parentDirObj == nil {
parentDirObj = &model.ObjectTree{}
if parentDir != "." {
parentDirObj.IsFolder = true
parentDirObj.Name = stdpath.Base(parentDir)
parentDirObj.Name = filepath.Base(parentDir)
parentDirObj.Modified = file.FileInfo().ModTime()
}
dirMap[parentDir] = parentDirObj
}
parentDirObj.Children = append(parentDirObj.Children, dirObj)
parentDir = stdpath.Dir(parentDir)
parentDir = filepath.Dir(parentDir)
if dirMap[parentDir] != nil {
break
}
@ -127,7 +128,7 @@ func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args mode
}
} else {
innerPath := strings.TrimPrefix(args.InnerPath, "/")
innerBase := stdpath.Base(innerPath)
innerBase := filepath.Base(innerPath)
createdBaseDir := false
for _, file := range files {
name := file.Name()
@ -138,7 +139,7 @@ func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args mode
}
break
} else if strings.HasPrefix(name, innerPath+"/") {
targetPath := stdpath.Join(outputPath, innerBase)
targetPath := filepath.Join(outputPath, innerBase)
if !createdBaseDir {
err = os.Mkdir(targetPath, 0700)
if err != nil {
@ -159,12 +160,16 @@ func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args mode
func decompress(file SubFile, filePath, outputPath, password string) error {
targetPath := outputPath
dir, base := stdpath.Split(filePath)
dir, base := filepath.Split(filePath)
if dir != "" {
targetPath = stdpath.Join(targetPath, dir)
err := os.MkdirAll(targetPath, 0700)
if err != nil {
return err
targetPath = filepath.Join(targetPath, dir)
if strings.HasPrefix(targetPath, outputPath+string(os.PathSeparator)) {
err := os.MkdirAll(targetPath, 0700)
if err != nil {
return err
}
} else {
targetPath = outputPath
}
}
if base != "" {
@ -185,7 +190,11 @@ func _decompress(file SubFile, targetPath, password string, up model.UpdateProgr
return err
}
defer func() { _ = rc.Close() }()
f, err := os.OpenFile(stdpath.Join(targetPath, file.FileInfo().Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
destPath := filepath.Join(targetPath, file.FileInfo().Name())
if !strings.HasPrefix(destPath, targetPath+string(os.PathSeparator)) {
return fmt.Errorf("illegal file path: %s", file.FileInfo().Name())
}
f, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return err
}

View File

@ -111,8 +111,10 @@ func InitialSettings() []model.SettingItem {
{Key: conf.Favicon, Value: "https://res.oplist.org/logo/logo.svg", MigrationValue: "https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.svg", Type: conf.TypeString, Group: model.STYLE},
{Key: conf.MainColor, Value: "#1890ff", Type: conf.TypeString, Group: model.STYLE},
{Key: "home_icon", Value: "🏠", Type: conf.TypeString, Group: model.STYLE},
{Key: "share_icon", Value: "🎁", Type: conf.TypeString, Group: model.STYLE},
{Key: "home_container", Value: "max_980px", Type: conf.TypeSelect, Options: "max_980px,hope_container", Group: model.STYLE},
{Key: "settings_layout", Value: "list", Type: conf.TypeSelect, Options: "list,responsive", Group: model.STYLE},
{Key: conf.HideStorageDetails, Value: "false", Type: conf.TypeBool, Group: model.STYLE, Flag: model.PRIVATE},
// preview settings
{Key: conf.TextTypes, Value: "txt,htm,html,xml,java,properties,sql,js,md,json,conf,ini,vue,php,py,bat,gitignore,yml,go,sh,c,cpp,h,hpp,tsx,vtt,srt,ass,rs,lrc,strm", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
{Key: conf.AudioTypes, Value: "mp3,flac,ogg,m4a,wav,opus,wma", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
@ -161,8 +163,12 @@ func InitialSettings() []model.SettingItem {
{Key: conf.OcrApi, Value: "https://openlistteam-ocr-api-server.hf.space/ocr/file/json", MigrationValue: "https://api.example.com/ocr/file/json", Type: conf.TypeString, Group: model.GLOBAL}, // TODO: This can be replace by a community-hosted endpoint, see https://github.com/OpenListTeam/ocr_api_server
{Key: conf.FilenameCharMapping, Value: `{"/": "|"}`, Type: conf.TypeText, Group: model.GLOBAL},
{Key: conf.ForwardDirectLinkParams, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL},
{Key: conf.IgnoreDirectLinkParams, Value: "sign,openlist_ts", Type: conf.TypeString, Group: model.GLOBAL},
{Key: conf.IgnoreDirectLinkParams, Value: "sign,openlist_ts,raw", Type: conf.TypeString, Group: model.GLOBAL},
{Key: conf.WebauthnLoginEnabled, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PUBLIC},
{Key: conf.SharePreview, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PUBLIC},
{Key: conf.ShareArchivePreview, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PUBLIC},
{Key: conf.ShareForceProxy, Value: "true", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PRIVATE},
{Key: conf.ShareSummaryContent, Value: "@{{creator}} shared {{#each files}}{{#if @first}}\"{{filename this}}\"{{/if}}{{#if @last}}{{#unless (eq @index 0)}} and {{@index}} more files{{/unless}}{{/if}}{{/each}} from {{site_title}}: {{base_url}}/@s/{{id}}{{#if pwd}} , the share code is {{pwd}}{{/if}}{{#if expires}}, please access before {{dateLocaleString expires}}.{{/if}}", Type: conf.TypeText, Group: model.GLOBAL, Flag: model.PUBLIC},
// single settings
{Key: conf.Token, Value: token, Type: conf.TypeString, Group: model.SINGLE, Flag: model.PRIVATE},

View File

@ -33,8 +33,8 @@ func initUser() {
Role: model.ADMIN,
BasePath: "/",
Authn: "[]",
// 0(can see hidden) - 7(can remove) & 12(can read archives) - 13(can decompress archives)
Permission: 0x31FF,
// 0(can see hidden) - 8(webdav read) & 12(can read archives) - 14(can share)
Permission: 0x71FF,
}
if err := op.CreateUser(admin); err != nil {
panic(err)

View File

@ -17,9 +17,10 @@ const (
AllowMounted = "allow_mounted"
RobotsTxt = "robots_txt"
Logo = "logo" // multi-lines text, L1: light, EOL: dark
Favicon = "favicon"
MainColor = "main_color"
Logo = "logo" // multi-lines text, L1: light, EOL: dark
Favicon = "favicon"
MainColor = "main_color"
HideStorageDetails = "hide_storage_details"
// preview
TextTypes = "text_types"
@ -33,6 +34,7 @@ const (
PreviewArchivesByDefault = "preview_archives_by_default"
ReadMeAutoRender = "readme_autorender"
FilterReadMeScripts = "filter_readme_scripts"
// global
HideFiles = "hide_files"
CustomizeHead = "customize_head"
@ -45,6 +47,10 @@ const (
ForwardDirectLinkParams = "forward_direct_link_params"
IgnoreDirectLinkParams = "ignore_direct_link_params"
WebauthnLoginEnabled = "webauthn_login_enabled"
SharePreview = "share_preview"
ShareArchivePreview = "share_archive_preview"
ShareForceProxy = "share_force_proxy"
ShareSummaryContent = "share_summary_content"
// index
SearchIndex = "search_index"
@ -167,4 +173,5 @@ const (
RequestHeaderKey
UserAgentKey
PathKey
SharingIDKey
)

View File

@ -12,7 +12,7 @@ var db *gorm.DB
func Init(d *gorm.DB) {
db = d
err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey))
err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey), new(model.SharingDB))
if err != nil {
log.Fatalf("failed migrate database: %s", err.Error())
}

62
internal/db/sharing.go Normal file
View File

@ -0,0 +1,62 @@
package db
import (
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils/random"
"github.com/pkg/errors"
)
func GetSharingById(id string) (*model.SharingDB, error) {
s := model.SharingDB{ID: id}
if err := db.Where(s).First(&s).Error; err != nil {
return nil, errors.Wrapf(err, "failed get sharing")
}
return &s, nil
}
func GetSharings(pageIndex, pageSize int) (sharings []model.SharingDB, count int64, err error) {
sharingDB := db.Model(&model.SharingDB{})
if err := sharingDB.Count(&count).Error; err != nil {
return nil, 0, errors.Wrapf(err, "failed get sharings count")
}
if err := sharingDB.Order(columnName("id")).Offset((pageIndex - 1) * pageSize).Limit(pageSize).Find(&sharings).Error; err != nil {
return nil, 0, errors.Wrapf(err, "failed get find sharings")
}
return sharings, count, nil
}
func GetSharingsByCreatorId(creator uint, pageIndex, pageSize int) (sharings []model.SharingDB, count int64, err error) {
sharingDB := db.Model(&model.SharingDB{})
cond := model.SharingDB{CreatorId: creator}
if err := sharingDB.Where(cond).Count(&count).Error; err != nil {
return nil, 0, errors.Wrapf(err, "failed get sharings count")
}
if err := sharingDB.Where(cond).Order(columnName("id")).Offset((pageIndex - 1) * pageSize).Limit(pageSize).Find(&sharings).Error; err != nil {
return nil, 0, errors.Wrapf(err, "failed get find sharings")
}
return sharings, count, nil
}
func CreateSharing(s *model.SharingDB) (string, error) {
id := random.String(8)
for len(id) < 12 {
old := model.SharingDB{
ID: id,
}
if err := db.Where(old).First(&old).Error; err != nil {
s.ID = id
return id, errors.WithStack(db.Create(s).Error)
}
id += random.String(1)
}
return "", errors.New("failed find valid id")
}
func UpdateSharing(s *model.SharingDB) error {
return errors.WithStack(db.Save(s).Error)
}
func DeleteSharingById(id string) error {
s := model.SharingDB{ID: id}
return errors.WithStack(db.Where(s).Delete(&s).Error)
}

View File

@ -210,6 +210,11 @@ type ArchiveDecompressResult interface {
ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error)
}
type WithDetails interface {
// GetDetails get storage details (total space, free space, etc.)
GetDetails(ctx context.Context) (*model.StorageDetails, error)
}
type Reference interface {
InitReference(storage Driver) error
}

View File

@ -23,6 +23,10 @@ var (
UnknownArchiveFormat = errors.New("unknown archive format")
WrongArchivePassword = errors.New("wrong archive password")
DriverExtractNotSupported = errors.New("driver extraction not supported")
WrongShareCode = errors.New("wrong share code")
InvalidSharing = errors.New("invalid sharing")
SharingNotFound = errors.New("sharing not found")
)
// NewErr wrap constant error with an extra message

View File

@ -19,8 +19,9 @@ import (
// then pass the actual path to the op package
type ListArgs struct {
Refresh bool
NoLog bool
Refresh bool
NoLog bool
WithStorageDetails bool
}
func List(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) {
@ -35,11 +36,12 @@ func List(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error)
}
type GetArgs struct {
NoLog bool
NoLog bool
WithStorageDetails bool
}
func Get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) {
res, err := get(ctx, path)
res, err := get(ctx, path, args)
if err != nil {
if !args.NoLog {
log.Warnf("failed get %s: %s", path, err)
@ -168,7 +170,7 @@ func GetStorage(path string, args *GetStoragesArgs) (driver.Driver, error) {
func Other(ctx context.Context, args model.FsOtherArgs) (interface{}, error) {
res, err := other(ctx, args)
if err != nil {
log.Errorf("failed remove %s: %+v", args.Path, err)
log.Errorf("failed get other %s: %+v", args.Path, err)
}
return res, err
}

View File

@ -11,11 +11,11 @@ import (
"github.com/pkg/errors"
)
func get(ctx context.Context, path string) (model.Obj, error) {
func get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) {
path = utils.FixAndCleanPath(path)
// maybe a virtual file
if path != "/" {
virtualFiles := op.GetStorageVirtualFilesByPath(stdpath.Dir(path))
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, stdpath.Dir(path), !args.WithStorageDetails)
for _, f := range virtualFiles {
if f.GetName() == stdpath.Base(path) {
return f, nil

View File

@ -15,7 +15,7 @@ import (
func list(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) {
meta, _ := ctx.Value(conf.MetaKey).(*model.Meta)
user, _ := ctx.Value(conf.UserKey).(*model.User)
virtualFiles := op.GetStorageVirtualFilesByPath(path)
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails)
storage, actualPath, err := op.GetStorageAndActualPath(path)
if err != nil && len(virtualFiles) == 0 {
return nil, errors.WithMessage(err, "failed get storage")

View File

@ -77,6 +77,26 @@ type ArchiveDecompressArgs struct {
PutIntoNewDir bool
}
type SharingListArgs struct {
Refresh bool
Pwd string
}
type SharingArchiveMetaArgs struct {
ArchiveMetaArgs
Pwd string
}
type SharingArchiveListArgs struct {
ArchiveListArgs
Pwd string
}
type SharingLinkArgs struct {
Pwd string
LinkArgs
}
type RangeReaderIF interface {
RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error)
}

View File

@ -80,6 +80,10 @@ type SetPath interface {
SetPath(path string)
}
type ObjWithProvider interface {
GetProvider() string
}
func SortFiles(objs []Obj, orderBy, orderDirection string) {
if orderBy == "" {
return
@ -166,6 +170,16 @@ func GetUrl(obj Obj) (url string, ok bool) {
return url, false
}
func GetProvider(obj Obj) (string, bool) {
if obj, ok := obj.(ObjWithProvider); ok {
return obj.GetProvider(), true
}
if unwrap, ok := obj.(ObjUnwrap); ok {
return GetProvider(unwrap.Unwrap())
}
return "unknown", false
}
func GetRawObject(obj Obj) *Object {
switch v := obj.(type) {
case *ObjThumbURL:
@ -174,6 +188,8 @@ func GetRawObject(obj Obj) *Object {
return &v.Object
case *ObjectURL:
return &v.Object
case *ObjectProvider:
return &v.Object
case *Object:
return v
}

View File

@ -99,3 +99,16 @@ type ObjThumbURL struct {
Thumbnail
Url
}
type Provider struct {
Provider string
}
func (p Provider) GetProvider() string {
return p.Provider
}
type ObjectProvider struct {
Object
Provider
}

Some files were not shown because too many files have changed in this diff Show More