Compare commits

..

43 Commits

Author SHA1 Message Date
4451988214 fix(drivers): add session renewal cron for MediaFire driver (#9321)
- Implement automatic session token renewal every 6-9 minutes
- Add validation for required SessionToken and Cookie fields in Init
- Handle session expiration by calling renewToken on validation failure
- Prevent storage failures due to MediaFire session timeouts

Fixes session closure issues that occur after server restarts or extended periods.

Co-authored-by: Da3zKi7 <da3zki7@duck.com>
2025-09-14 19:37:46 +08:00
ba497fd60e fix(mediafire): fix code errors in mediafire 2025-09-14 19:28:51 +08:00
e0e91ec76a fix(mediafire): fix code errors in mediafire 2025-09-14 19:24:12 +08:00
21c19f179b feat(drivers): add MediaFire driver support (#9319)
- Implement complete MediaFire storage driver
- Add authentication via session_token and cookie
- Support all core operations: List, Get, Link, Put, Copy, Move, Remove, Rename, MakeDir
- Include thumbnail generation for media files
- Handle MediaFire's resumable upload API with multi-unit transfers
- Add proper error handling and progress reporting

Co-authored-by: Da3zKi7 <da3zki7@duck.com>
2025-09-14 19:11:33 +08:00
8bbdb272d4 docs(readme): extend driver list with newest support (#1271) 2025-09-13 20:41:17 +08:00
c15ae94307 feat(189PC,189TV): add refreshToken and qrcode login (#1205)
### Key Changes
- **189PC**: Add QR code login and refresh token support
- **189TV**: Add session refresh mechanism and fix TempUuid persistence issue
- **Both**: Implement session keep-alive with cron jobs (5min interval)

### Features
- QR code authentication for 189PC as alternative to password login
- Automatic token refresh to avoid frequent re-authentication
- Session keep-alive to maintain long-term connections
- Retry logic with max attempts to prevent infinite loops

### Fixes
- Fixed 189TV TempUuid causing storage corruption on QR code reload
- Enhanced error handling for token expiration scenarios
2025-09-13 13:59:47 +08:00
f1a5048558 feat(drivers): add cnb_releases (#1033)
* feat(drivers): add cnb_releases

* feat(cnb_release): implement reference

* refactor(cnb_releases): get release info by ID instead of tag name

* feat(cnb_releases): add option to use tag name instead of release name

* fix(cnb_releases): set default root and improve release info retrieval

* feat(cnb_releases): implement Put

* perf(cnb_release): use io.Pipe to stream file upload

* perf(cnb_releases): add context timeout for file upload request

* feat(cnb_releases): implement Remove

* feat(cnb_releases): implement MakeDir

* feat(cnb_releases): implement Rename

* feat(cnb_releases): require repo and token in Addition

* chore(cnb_releases): remove unused code

* Revert 'perf(cnb_release): use io.Pipe to stream file upload'

* perf(cnb_releases): optimize upload with MultiReader

* feat(cnb_releases): add DefaultBranch

---------

Co-authored-by: ILoveScratch <ilovescratch@foxmail.com>
2025-09-11 18:11:32 +08:00
1fe26bff9a feat(local): auto create recycle dir if not exists (#1244) 2025-09-10 20:57:21 +08:00
433dcd156b fix(ci): add tag_name to upload assets step (#1234)
fix(release): add tag_name to upload assets step
2025-09-06 22:51:05 +08:00
e97f0a289e feat(cloudreve_v4): enhance token management (#1171)
* fix(cloudreve_v4): improve error handling in request method

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* feat(cloudreve_v4): enhance token management with expiration checks and refresh logic

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* feat(cloudreve_v4): add JWT structures for access and refresh tokens; validate access token on initialization

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(cloudreve_v4): improve error messages

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-09-04 19:41:41 +08:00
89f35170b3 fix(fs): clear cache after directory rename to ensure consistency (#1193)
Clear cache after renaming the directory.
2025-09-01 18:47:54 +08:00
8188fb2d7d fix(123open): get direct link (#1185)
* fix(123open): correct query parameter name from 'fileId' to 'fileID' in getDirectLink function

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123open): change SpaceTempExpr type from 'string' to 'int64' in UserInfoResp struct

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123open): comment out unused fields in UserInfoResp struct

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(123open): add getUID method and cache UID

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-31 15:47:38 +08:00
87cf95f50b fix(139): refactor part upload logic (#1184)
* fix(139): refactor part upload logic

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): handle upload errors

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): sort upload parts by PartNumber before uploading

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): improve error handling

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* fix(139): add validation for upload part index to prevent out of bounds errors

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-31 15:47:12 +08:00
8ab26cb823 fix(123open): change DirectLink type from 'boolean' to 'bool' (#1180)
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-29 19:06:37 +08:00
5880c8e1af fix(189tv): use rate-limited upload stream in OldUpload function (#1176)
* fix(189tv): use rate-limited upload stream in OldUpload function

* fix(189tv): wrap tempFile with io.NopCloser to prevent premature closure in OldUpload function

* .
2025-08-29 16:01:50 +08:00
14bf4ecb4c fix(share): support custom proxy url (#1130)
* feat(share): support custom proxy url

* fix(share): count access

* fix: maybe a path traversal vulnerability?
2025-08-28 22:11:19 +08:00
04a5e58781 fix(server): can't edit .md source files (#1159)
* fix(server): can't edit .md source files

* chore

* add ignore direct link args
2025-08-28 16:19:57 +08:00
bbd4389345 fix(wopan): use fixed timezone for parsing time (#1170)
fix(wopan): update getTime function to use fixed timezone for parsing

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-28 13:02:02 +08:00
f350ccdf95 fix(189pc): sliceSize must not be equal to fileSize (#1169)
* fix(189pc): sliceSize not equal to fileSize

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* Update comment for sliceSize parameter

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-28 11:32:40 +08:00
4f2de9395e feat(degoo): token improvement (#1149)
* Update driver.go

Signed-off-by: Caspian <app@caspian.im>

* Update meta.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: Caspian <app@caspian.im>

* Update util.go

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* make account optional

* ensure username and password

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: Caspian <app@caspian.im>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-26 01:22:59 +08:00
b0dbbebfb0 feat(drivers): add Teldrive driver (#1116)
https://github.com/tgdrive/teldrive

https://teldrive-docs.pages.dev/docs/api

实现:
* copy
* move
* link (302 share and local proxy)
* chunked uploads
* rename

未实现:
- openlist扫码登陆
- refresh token

https://github.com/OpenListTeam/OpenList-Docs/pull/155


* feat(Teldrive): Add driver Teldrive

* fix(teldrive): force webproxy and memory optimized

* chore(teldrive): go fmt

* chore(teldrive): remove TODO

* chore(teldrive): organize code

* feat(teldrive): add UseShareLink option and support 302

* fix(teldrive): standardize API path construction

* fix(teldrive): trim trailing slash from Address in Init method

* chore(teldrive): update help text for UseShareLink field in Addition struct

* fix(teldrive): set 10 MiB as default chunk size

---------

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: ILoveScratch <ilovescratch@foxmail.com>
2025-08-25 01:34:08 +08:00
0c27b4bd47 docs(contributing): update guidelines (#983)
[skip ci]

* docs(contributing): update guidelines

* docs(contributing): clarify fork

* docs(contributing): sync translation

Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* docs(contributing): add label and cc reminder

* docs(contributing): remove ensure new branch from checklist

* docs(contributing): replace generic GitHub URLs with user-specific ones

* docs(contributing): make branch deletion after PR merge optional

* docs(contributing): keep --recurse-submodules

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Yinan Qin <39023210+elysia-best@users.noreply.github.com>
2025-08-24 20:13:11 +08:00
736cd9e5f2 fix(quark): fix getTranscodingLink (#1136)
The first video info may not contain url

* fix(quark): fix getTranscodingLink

* fix(quark_tv): fix getTranscodingLink
2025-08-24 19:55:10 +08:00
c7a603c926 fix(115): fix get 115 app version (#1137) 2025-08-24 19:50:21 +08:00
a28d6d5693 fix(123_open): fix token refresh (#1121) 2025-08-23 23:01:41 +08:00
e59d2233e2 feat(drivers): add Degoo driver (#1097)
* Create driver.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Create util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Create types.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Create meta.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update drivers/degoo/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: CaspianGUAN <app@caspian.im>

* Update drivers/degoo/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: CaspianGUAN <app@caspian.im>

* Update driver.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update meta.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update types.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update driver.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* Update drivers/degoo/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: CaspianGUAN <app@caspian.im>

* Update util.go

Signed-off-by: CaspianGUAN <app@caspian.im>

* refactor(degoo): add Degoo driver integration and update API handling

* fix(degoo): apply suggestions

---------

Signed-off-by: CaspianGUAN <app@caspian.im>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-23 22:47:02 +08:00
01914a06ef refactor(ci): add permissions check at docker's entrypoint (#1128)
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-22 19:35:48 +08:00
6499374d1c fix(deps): update 115driver to v1.1.1 (close SheltonZhu/115driver#57) (#1115) 2025-08-20 21:33:21 +08:00
b054919d5c feat(ilanzou): add support for rapid upload and fix duplication handling (#1065)
* feat(ilanzou): add support for rapid upload token handling

* feat(ilanzou): add NoOverwriteUpload option
2025-08-19 19:19:44 +08:00
048ee9c2e5 feat(server): adapting #1099 to #991 (#1102) 2025-08-19 15:48:59 +08:00
23394548ca feat(123_open): add DirectLink option (#1045)
* feat(123_open): add `UseDirectLink` option

* feat(123_open): update rate limit rules

* fix(123_open): update api

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* feat(123_open): enhance direct link functionality with private key and expiration

* refactor(123_open): use UUID for random generation

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-19 15:23:10 +08:00
b04677b806 feat(server): add error page and status code (#1099) 2025-08-19 15:18:12 +08:00
e4c902dd93 feat(share): support more secure file sharing (#991)
提供一种类似大多数网盘的文件分享操作,这种分享方式可以通过强制 Web 代理隐藏文件源路径,可以设置分享码、最大访问数和过期时间,并且不需要启用 guest 用户。

在全局设置中可以调整:
- 是否强制 Web 代理
- 是否允许预览
- 是否允许预览压缩文件
- 分享文件后,点击“复制链接”按钮复制的内容

前端部分:OpenListTeam/OpenList-Frontend#156
文档部分:OpenListTeam/OpenList-Docs#130

Close #183
Close #526
Close #860
Close #892
Close #1079


* feat(share): support more secure file sharing

* feat(share): add archive preview

* fix(share): fix some bugs

* feat(openlist_share): add openlist share driver

* fix(share): lack unwrap when get virtual path

* fix: use unwrapPath instead of path for virtual file name comparison

* fix(share): change request method of /api/share/list from GET to Any

* fix(share): path traversal vulnerability in sharing path check

* 修复分享alias驱动的文件 没开代理时无法获取URL

* fix(sharing): update error message for sharing root link extraction

---------

Co-authored-by: Suyunmeng <69945917+Suyunmeng@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-08-19 15:10:02 +08:00
5d8bd258c0 refactor(docker): reduce docker image size (#1091)
* fix(docker): reduce image size

* refactor(docker): update user and group creation

* Update Dockerfile

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-19 10:27:33 +08:00
08c5283c8c feat(docker): Update docker-compose configuration (#1081)
* feat(docker): Update docker-compose configuration

* Update docker-compose.yml

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
Signed-off-by: huancun _- <huancun@hc26.org>

---------

Signed-off-by: huancun _- <huancun@hc26.org>
Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-08-18 14:29:59 +08:00
10a14f10cd fix(docker): improve startup process and SIGTERM handling (#1089)
* fix(ci): Modify the way of star OpenList.

* fix(ci): start runsvdir
2025-08-18 11:13:05 +08:00
f86ebc52a0 refactor(123_open): improve upload (#1076)
* refactor(123_open): improve upload

* optimize buffer initialization for multipart form

* 每次重试生成新的表单

* .
2025-08-17 14:25:23 +08:00
016ed90efa feat(stream): fast buffer freeing for large cache (#1053)
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-16 17:19:52 +08:00
d76407b201 fix(dropbox): incorrect path error during upload (#1052)
* Fix incorrect path error during upload on Dropbox

* Add RootNamespaceId to the config for direct modification

* Refactor Dropbox header logic: extract JSON marshaling into helper method

* Fix Dropbox: replace marshalToJSONString with utils.Json.MarshalToString
2025-08-16 14:18:02 +08:00
5de6b660f2 fix(terabox): user not exists error (#1056)
* fix user location error when upload file
2025-08-15 21:25:57 +08:00
71ada3b656 fix(ci-sync): fix workflow for syncing Repository (#1062) 2025-08-15 18:48:55 +08:00
dc42f0e226 [skip ci]fix(ci): update sync workflow (#1061) 2025-08-15 18:36:52 +08:00
74bf9f6467 [skip ci]feat(sync): add workflow to sync GitHub repository (#1060)
feat(sync): add workflow to sync GitHub repository to Gitee
2025-08-15 18:12:29 +08:00
101 changed files with 6837 additions and 521 deletions

56
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,56 @@
<!--
Provide a general summary of your changes in the Title above.
The PR title must start with `feat(): `, `docs(): `, `fix(): `, `style(): `, or `refactor(): `, `chore(): `. For example: `feat(component): add new feature`.
If it spans multiple components, use the main component as the prefix and enumerate in the title, describe in the body.
-->
<!--
在上方标题中提供您更改的总体摘要。
PR 标题需以 `feat(): `, `docs(): `, `fix(): `, `style(): `, `refactor(): `, `chore(): ` 其中之一开头,例如:`feat(component): 新增功能`
如果跨多个组件,请使用主要组件作为前缀,并在标题中枚举、描述中说明。
-->
## Description / 描述
<!-- Describe your changes in detail -->
<!-- 详细描述您的更改 -->
## Motivation and Context / 背景
<!-- Why is this change required? What problem does it solve? -->
<!-- 为什么需要此更改?它解决了什么问题? -->
<!-- If it fixes an open issue, please link to the issue here. -->
<!-- 如果修复了一个打开的issue请在此处链接到该issue -->
Closes #XXXX
<!-- or -->
<!-- 或者 -->
Relates to #XXXX
## How Has This Been Tested? / 测试
<!-- Please describe in detail how you tested your changes. -->
<!-- 请详细描述您如何测试更改 -->
## Checklist / 检查清单
<!-- Go over all the following points, and put an `x` in all the boxes that apply. -->
<!-- 检查以下所有要点,并在所有适用的框中打`x` -->
<!-- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
<!-- 如果您对其中任何一项不确定,请不要犹豫提问。我们会帮助您! -->
- [ ] I have read the [CONTRIBUTING](https://github.com/OpenListTeam/OpenList/blob/main/CONTRIBUTING.md) document.
我已阅读 [CONTRIBUTING](https://github.com/OpenListTeam/OpenList/blob/main/CONTRIBUTING.md) 文档。
- [ ] I have formatted my code with `go fmt` or [prettier](https://prettier.io/).
我已使用 `go fmt` 或 [prettier](https://prettier.io/) 格式化提交的代码。
- [ ] I have added appropriate labels to this PR (or mentioned needed labels in the description if lacking permissions).
我已为此 PR 添加了适当的标签(如无权限或需要的标签不存在,请在描述中说明,管理员将后续处理)。
- [ ] I have requested review from relevant code authors using the "Request review" feature when applicable.
我已在适当情况下使用"Request review"功能请求相关代码作者进行审查。
- [ ] I have updated the repository accordingly (If its needed).
我已相应更新了相关仓库(若适用)。
- [ ] [OpenList-Frontend](https://github.com/OpenListTeam/OpenList-Frontend) #XXXX
- [ ] [OpenList-Docs](https://github.com/OpenListTeam/OpenList-Docs) #XXXX

View File

@ -73,4 +73,5 @@ jobs:
with: with:
files: build/compress/* files: build/compress/*
prerelease: false prerelease: false
tag_name: ${{ github.event.release.tag_name }}

38
.github/workflows/sync_repo.yml vendored Normal file
View File

@ -0,0 +1,38 @@
name: Sync to Gitee
on:
push:
branches:
- main
workflow_dispatch:
jobs:
sync:
runs-on: ubuntu-latest
name: Sync GitHub to Gitee
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup SSH
run: |
mkdir -p ~/.ssh
echo "${{ secrets.GITEE_SSH_PRIVATE_KEY }}" > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
ssh-keyscan gitee.com >> ~/.ssh/known_hosts
- name: Create single commit and push
run: |
git config user.name "GitHub Actions"
git config user.email "actions@github.com"
# Create a new branch
git checkout --orphan new-main
git add .
git commit -m "Sync from GitHub: $(date)"
# Add Gitee remote and force push
git remote add gitee ${{ vars.GITEE_REPO_URL }}
git push --force gitee new-main:main

View File

@ -2,106 +2,76 @@
## Setup your machine ## Setup your machine
`OpenList` is written in [Go](https://golang.org/) and [React](https://reactjs.org/). `OpenList` is written in [Go](https://golang.org/) and [SolidJS](https://www.solidjs.com/).
Prerequisites: Prerequisites:
- [git](https://git-scm.com) - [git](https://git-scm.com)
- [Go 1.20+](https://golang.org/doc/install) - [Go 1.24+](https://golang.org/doc/install)
- [gcc](https://gcc.gnu.org/) - [gcc](https://gcc.gnu.org/)
- [nodejs](https://nodejs.org/) - [nodejs](https://nodejs.org/)
Clone `OpenList` and `OpenList-Frontend` anywhere: ## Cloning a fork
Fork and clone `OpenList` and `OpenList-Frontend` anywhere:
```shell ```shell
$ git clone https://github.com/OpenListTeam/OpenList.git $ git clone https://github.com/<your-username>/OpenList.git
$ git clone --recurse-submodules https://github.com/OpenListTeam/OpenList-Frontend.git $ git clone --recurse-submodules https://github.com/<your-username>/OpenList-Frontend.git
```
## Creating a branch
Create a new branch from the `main` branch, with an appropriate name.
```shell
$ git checkout -b <branch-name>
``` ```
You should switch to the `main` branch for development.
## Preview your change ## Preview your change
### backend ### backend
```shell ```shell
$ go run main.go $ go run main.go
``` ```
### frontend ### frontend
```shell ```shell
$ pnpm dev $ pnpm dev
``` ```
## Add a new driver ## Add a new driver
Copy `drivers/template` folder and rename it, and follow the comments in it. Copy `drivers/template` folder and rename it, and follow the comments in it.
## Create a commit ## Create a commit
Commit messages should be well formatted, and to make that "standardized". Commit messages should be well formatted, and to make that "standardized".
### Commit Message Format Submit your pull request. For PR titles, follow [Conventional Commits](https://www.conventionalcommits.org).
Each commit message consists of a **header**, a **body** and a **footer**. The header has a special
format that includes a **type**, a **scope** and a **subject**:
``` https://github.com/OpenListTeam/OpenList/issues/376
<type>(<scope>): <subject>
<BLANK LINE>
<body>
<BLANK LINE>
<footer>
```
The **header** is mandatory and the **scope** of the header is optional. It's suggested to sign your commits. See: [How to sign commits](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits)
Any line of the commit message cannot be longer than 100 characters! This allows the message to be easier
to read on GitHub as well as in various git tools.
### Revert
If the commit reverts a previous commit, it should begin with `revert: `, followed by the header
of the reverted commit.
In the body it should say: `This reverts commit <hash>.`, where the hash is the SHA of the commit
being reverted.
### Type
Must be one of the following:
* **feat**: A new feature
* **fix**: A bug fix
* **docs**: Documentation only changes
* **style**: Changes that do not affect the meaning of the code (white-space, formatting, missing
semi-colons, etc)
* **refactor**: A code change that neither fixes a bug nor adds a feature
* **perf**: A code change that improves performance
* **test**: Adding missing or correcting existing tests
* **build**: Affects project builds or dependency modifications
* **revert**: Restore the previous commit
* **ci**: Continuous integration of related file modifications
* **chore**: Changes to the build process or auxiliary tools and libraries such as documentation
generation
* **release**: Release a new version
### Scope
The scope could be anything specifying place of the commit change. For example `$location`,
`$browser`, `$compile`, `$rootScope`, `ngHref`, `ngClick`, `ngView`, etc...
You can use `*` when the change affects more than a single scope.
### Subject
The subject contains succinct description of the change:
* use the imperative, present tense: "change" not "changed" nor "changes"
* don't capitalize first letter
* no dot (.) at the end
### Body
Just as in the **subject**, use the imperative, present tense: "change" not "changed" nor "changes".
The body should include the motivation for the change and contrast this with previous behavior.
### Footer
The footer should contain any information about **Breaking Changes** and is also the place to
[reference GitHub issues that this commit closes](https://help.github.com/articles/closing-issues-via-commit-messages/).
**Breaking Changes** should start with the word `BREAKING CHANGE:` with a space or two newlines.
The rest of the commit message is then used for this.
## Submit a pull request ## Submit a pull request
Push your branch to your `openlist` fork and open a pull request against the Please make sure your code has been formatted with `go fmt` or [prettier](https://prettier.io/) before submitting.
`main` branch.
Push your branch to your `openlist` fork and open a pull request against the `main` branch.
## Merge your pull request
Your pull request will be merged after review. Please wait for the maintainer to merge your pull request after review.
At least 1 approving review is required by reviewers with write access. You can also request a review from maintainers.
## Delete your branch
(Optional) After your pull request is merged, you can delete your branch.
---
Thank you for your contribution! Let's make OpenList better together!

View File

@ -20,11 +20,12 @@ ARG GID=1001
WORKDIR /opt/openlist/ WORKDIR /opt/openlist/
COPY --chmod=755 --from=builder /app/bin/openlist ./ RUN addgroup -g ${GID} ${USER} && \
COPY --chmod=755 entrypoint.sh /entrypoint.sh adduser -D -u ${UID} -G ${USER} ${USER} && \
RUN adduser -u ${UID} -g ${GID} -h /opt/openlist/data -D -s /bin/sh ${USER} \ mkdir -p /opt/openlist/data
&& chown -R ${UID}:${GID} /opt \
&& chown -R ${UID}:${GID} /entrypoint.sh COPY --from=builder --chmod=755 --chown=${UID}:${GID} /app/bin/openlist ./
COPY --chmod=755 --chown=${UID}:${GID} entrypoint.sh /entrypoint.sh
USER ${USER} USER ${USER}
RUN /entrypoint.sh version RUN /entrypoint.sh version

View File

@ -10,12 +10,12 @@ ARG GID=1001
WORKDIR /opt/openlist/ WORKDIR /opt/openlist/
COPY --chmod=755 /build/${TARGETPLATFORM}/openlist ./ RUN addgroup -g ${GID} ${USER} && \
COPY --chmod=755 entrypoint.sh /entrypoint.sh adduser -D -u ${UID} -G ${USER} ${USER} && \
mkdir -p /opt/openlist/data
RUN adduser -u ${UID} -g ${GID} -h /opt/openlist/data -D -s /bin/sh ${USER} \ COPY --chmod=755 --chown=${UID}:${GID} /build/${TARGETPLATFORM}/openlist ./
&& chown -R ${UID}:${GID} /opt \ COPY --chmod=755 --chown=${UID}:${GID} entrypoint.sh /entrypoint.sh
&& chown -R ${UID}:${GID} /entrypoint.sh
USER ${USER} USER ${USER}
RUN /entrypoint.sh version RUN /entrypoint.sh version

View File

@ -65,6 +65,7 @@ Thank you for your support and understanding of the OpenList project.
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV) - [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
- [x] Teambition([China](https://www.teambition.com), [International](https://us.teambition.com)) - [x] Teambition([China](https://www.teambition.com), [International](https://us.teambition.com))
- [x] [Mediatrack](https://www.mediatrack.cn) - [x] [Mediatrack](https://www.mediatrack.cn)
- [x] [MediaFire](https://www.mediafire.com)
- [x] [139yun](https://yun.139.com) (Personal, Family, Group) - [x] [139yun](https://yun.139.com) (Personal, Family, Group)
- [x] [YandexDisk](https://disk.yandex.com) - [x] [YandexDisk](https://disk.yandex.com)
- [x] [BaiduNetdisk](http://pan.baidu.com) - [x] [BaiduNetdisk](http://pan.baidu.com)
@ -74,7 +75,6 @@ Thank you for your support and understanding of the OpenList project.
- [x] [Thunder](https://pan.xunlei.com) - [x] [Thunder](https://pan.xunlei.com)
- [x] [Lanzou](https://www.lanzou.com) - [x] [Lanzou](https://www.lanzou.com)
- [x] [ILanzou](https://www.ilanzou.com) - [x] [ILanzou](https://www.ilanzou.com)
- [x] [Aliyundrive share](https://www.alipan.com)
- [x] [Google photo](https://photos.google.com) - [x] [Google photo](https://photos.google.com)
- [x] [Mega.nz](https://mega.nz) - [x] [Mega.nz](https://mega.nz)
- [x] [Baidu photo](https://photo.baidu.com) - [x] [Baidu photo](https://photo.baidu.com)
@ -85,6 +85,15 @@ Thank you for your support and understanding of the OpenList project.
- [x] [FeijiPan](https://www.feijipan.com) - [x] [FeijiPan](https://www.feijipan.com)
- [x] [dogecloud](https://www.dogecloud.com/product/oss) - [x] [dogecloud](https://www.dogecloud.com/product/oss)
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs) - [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
- [x] [Chaoxing](https://www.chaoxing.com)
- [x] [CNB](https://cnb.cool/)
- [x] [Degoo](https://degoo.com)
- [x] [Doubao](https://www.doubao.com)
- [x] [Febbox](https://www.febbox.com)
- [x] [GitHub](https://github.com)
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
- [x] [Weiyun](https://www.weiyun.com)
- [x] Easy to deploy and out-of-the-box - [x] Easy to deploy and out-of-the-box
- [x] File preview (PDF, markdown, code, plain text, ...) - [x] File preview (PDF, markdown, code, plain text, ...)
- [x] Image preview in gallery mode - [x] Image preview in gallery mode

View File

@ -65,6 +65,7 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV) - [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
- [x] Teambition([中国](https://www.teambition.com), [国际](https://us.teambition.com)) - [x] Teambition([中国](https://www.teambition.com), [国际](https://us.teambition.com))
- [x] [分秒帧](https://www.mediatrack.cn) - [x] [分秒帧](https://www.mediatrack.cn)
- [x] [MediaFire](https://www.mediafire.com)
- [x] [和彩云](https://yun.139.com)(个人、家庭、群组) - [x] [和彩云](https://yun.139.com)(个人、家庭、群组)
- [x] [YandexDisk](https://disk.yandex.com) - [x] [YandexDisk](https://disk.yandex.com)
- [x] [百度网盘](http://pan.baidu.com) - [x] [百度网盘](http://pan.baidu.com)
@ -74,7 +75,6 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
- [x] [迅雷网盘](https://pan.xunlei.com) - [x] [迅雷网盘](https://pan.xunlei.com)
- [x] [蓝奏云](https://www.lanzou.com) - [x] [蓝奏云](https://www.lanzou.com)
- [x] [蓝奏云优享版](https://www.ilanzou.com) - [x] [蓝奏云优享版](https://www.ilanzou.com)
- [x] [阿里云盘分享](https://www.alipan.com)
- [x] [Google 相册](https://photos.google.com) - [x] [Google 相册](https://photos.google.com)
- [x] [Mega.nz](https://mega.nz) - [x] [Mega.nz](https://mega.nz)
- [x] [百度相册](https://photo.baidu.com) - [x] [百度相册](https://photo.baidu.com)
@ -85,7 +85,15 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
- [x] [飞机盘](https://www.feijipan.com) - [x] [飞机盘](https://www.feijipan.com)
- [x] [多吉云](https://www.dogecloud.com/product/oss) - [x] [多吉云](https://www.dogecloud.com/product/oss)
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs) - [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
- [x] 部署方便,开箱即用 - [x] [超星](https://www.chaoxing.com)
- [x] [CNB](https://cnb.cool/)
- [x] [Degoo](https://degoo.com)
- [x] [豆包](https://www.doubao.com)
- [x] [Febbox](https://www.febbox.com)
- [x] [GitHub](https://github.com)
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
- [x] [微云](https://www.weiyun.com)
- [x] 文件预览PDF、markdown、代码、纯文本等 - [x] 文件预览PDF、markdown、代码、纯文本等
- [x] 画廊模式下的图片预览 - [x] 画廊模式下的图片预览
- [x] 视频和音频预览,支持歌词和字幕 - [x] 视频和音频预览,支持歌词和字幕

View File

@ -74,7 +74,6 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
- [x] [Thunder](https://pan.xunlei.com) - [x] [Thunder](https://pan.xunlei.com)
- [x] [Lanzou](https://www.lanzou.com) - [x] [Lanzou](https://www.lanzou.com)
- [x] [ILanzou](https://www.ilanzou.com) - [x] [ILanzou](https://www.ilanzou.com)
- [x] [Aliyundrive share](https://www.alipan.com)
- [x] [Google photo](https://photos.google.com) - [x] [Google photo](https://photos.google.com)
- [x] [Mega.nz](https://mega.nz) - [x] [Mega.nz](https://mega.nz)
- [x] [Baidu photo](https://photo.baidu.com) - [x] [Baidu photo](https://photo.baidu.com)
@ -85,6 +84,16 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
- [x] [FeijiPan](https://www.feijipan.com) - [x] [FeijiPan](https://www.feijipan.com)
- [x] [dogecloud](https://www.dogecloud.com/product/oss) - [x] [dogecloud](https://www.dogecloud.com/product/oss)
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs) - [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
- [x] [Chaoxing](https://www.chaoxing.com)
- [x] [CNB](https://cnb.cool/)
- [x] [Degoo](https://degoo.com)
- [x] [Doubao](https://www.doubao.com)
- [x] [Febbox](https://www.febbox.com)
- [x] [GitHub](https://github.com)
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
- [x] [Weiyun](https://www.weiyun.com)
- [x] [MediaFire](https://www.mediafire.com)
- [x] 簡単にデプロイでき、すぐに使える - [x] 簡単にデプロイでき、すぐに使える
- [x] ファイルプレビューPDF、markdown、コード、テキストなど - [x] ファイルプレビューPDF、markdown、コード、テキストなど
- [x] ギャラリーモードでの画像プレビュー - [x] ギャラリーモードでの画像プレビュー

View File

@ -74,7 +74,6 @@ Dank u voor uw ondersteuning en begrip
- [x] [Thunder](https://pan.xunlei.com) - [x] [Thunder](https://pan.xunlei.com)
- [x] [Lanzou](https://www.lanzou.com) - [x] [Lanzou](https://www.lanzou.com)
- [x] [ILanzou](https://www.ilanzou.com) - [x] [ILanzou](https://www.ilanzou.com)
- [x] [Aliyundrive share](https://www.alipan.com)
- [x] [Google photo](https://photos.google.com) - [x] [Google photo](https://photos.google.com)
- [x] [Mega.nz](https://mega.nz) - [x] [Mega.nz](https://mega.nz)
- [x] [Baidu photo](https://photo.baidu.com) - [x] [Baidu photo](https://photo.baidu.com)
@ -85,6 +84,15 @@ Dank u voor uw ondersteuning en begrip
- [x] [FeijiPan](https://www.feijipan.com) - [x] [FeijiPan](https://www.feijipan.com)
- [x] [dogecloud](https://www.dogecloud.com/product/oss) - [x] [dogecloud](https://www.dogecloud.com/product/oss)
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs) - [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
- [x] [Chaoxing](https://www.chaoxing.com)
- [x] [CNB](https://cnb.cool/)
- [x] [Degoo](https://degoo.com)
- [x] [Doubao](https://www.doubao.com)
- [x] [Febbox](https://www.febbox.com)
- [x] [GitHub](https://github.com)
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
- [x] [Weiyun](https://www.weiyun.com)
- [x] Eenvoudig te implementeren en direct te gebruiken - [x] Eenvoudig te implementeren en direct te gebruiken
- [x] Bestandsvoorbeeld (PDF, markdown, code, platte tekst, ...) - [x] Bestandsvoorbeeld (PDF, markdown, code, platte tekst, ...)
- [x] Afbeeldingsvoorbeeld in galerijweergave - [x] Afbeeldingsvoorbeeld in galerijweergave

View File

@ -6,10 +6,9 @@ services:
ports: ports:
- '5244:5244' - '5244:5244'
- '5245:5245' - '5245:5245'
user: '0:0'
environment: environment:
- PUID=0
- PGID=0
- UMASK=022 - UMASK=022
- TZ=UTC - TZ=Asia/Shanghai
container_name: openlist container_name: openlist
image: 'openlistteam/openlist:latest' image: 'openlistteam/openlist:latest'

View File

@ -1,43 +1,60 @@
package _115 package _115
import ( import (
"errors"
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
driver115 "github.com/SheltonZhu/115driver/pkg/driver" driver115 "github.com/SheltonZhu/115driver/pkg/driver"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
var ( var (
md5Salt = "Qclm8MGWUv59TnrR0XPg" md5Salt = "Qclm8MGWUv59TnrR0XPg"
appVer = "27.0.5.7" appVer = "35.6.0.3"
) )
func (d *Pan115) getAppVersion() ([]driver115.AppVersion, error) { func (d *Pan115) getAppVersion() (string, error) {
result := driver115.VersionResp{} result := VersionResp{}
resp, err := base.RestyClient.R().Get(driver115.ApiGetVersion) res, err := base.RestyClient.R().Get(driver115.ApiGetVersion)
err = driver115.CheckErr(err, &result, resp)
if err != nil { if err != nil {
return nil, err return "", err
} }
err = utils.Json.Unmarshal(res.Body(), &result)
return result.Data.GetAppVersions(), nil if err != nil {
return "", err
}
if len(result.Error) > 0 {
return "", errors.New(result.Error)
}
return result.Data.Win.Version, nil
} }
func (d *Pan115) getAppVer() string { func (d *Pan115) getAppVer() string {
// todo add some cache ver, err := d.getAppVersion()
vers, err := d.getAppVersion()
if err != nil { if err != nil {
log.Warnf("[115] get app version failed: %v", err) log.Warnf("[115] get app version failed: %v", err)
return appVer return appVer
} }
for _, ver := range vers { if len(ver) > 0 {
if ver.AppName == "win" { return ver
return ver.Version
}
} }
return appVer return appVer
} }
func (d *Pan115) initAppVer() { func (d *Pan115) initAppVer() {
appVer = d.getAppVer() appVer = d.getAppVer()
log.Debugf("use app version: %v", appVer)
}
type VersionResp struct {
Error string `json:"error,omitempty"`
Data Versions `json:"data"`
}
type Versions struct {
Win Version `json:"win"`
}
type Version struct {
Version string `json:"version_code"`
} }

View File

@ -17,6 +17,7 @@ import (
type Open123 struct { type Open123 struct {
model.Storage model.Storage
Addition Addition
UID uint64
} }
func (d *Open123) Config() driver.Config { func (d *Open123) Config() driver.Config {
@ -69,13 +70,45 @@ func (d *Open123) List(ctx context.Context, dir model.Obj, args model.ListArgs)
func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
fileId, _ := strconv.ParseInt(file.GetID(), 10, 64) fileId, _ := strconv.ParseInt(file.GetID(), 10, 64)
if d.DirectLink {
res, err := d.getDirectLink(fileId)
if err != nil {
return nil, err
}
if d.DirectLinkPrivateKey == "" {
duration := 365 * 24 * time.Hour // 缓存1年
return &model.Link{
URL: res.Data.URL,
Expiration: &duration,
}, nil
}
uid, err := d.getUID()
if err != nil {
return nil, err
}
duration := time.Duration(d.DirectLinkValidDuration) * time.Minute
newURL, err := d.SignURL(res.Data.URL, d.DirectLinkPrivateKey,
uid, duration)
if err != nil {
return nil, err
}
return &model.Link{
URL: newURL,
Expiration: &duration,
}, nil
}
res, err := d.getDownloadInfo(fileId) res, err := d.getDownloadInfo(fileId)
if err != nil { if err != nil {
return nil, err return nil, err
} }
link := model.Link{URL: res.Data.DownloadUrl} return &model.Link{URL: res.Data.DownloadUrl}, nil
return &link, nil
} }
func (d *Open123) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { func (d *Open123) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {

View File

@ -23,6 +23,11 @@ type Addition struct {
// 上传线程数 // 上传线程数
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"` UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
// 使用直链
DirectLink bool `json:"DirectLink" type:"bool" default:"false" required:"false" help:"use direct link when download file"`
DirectLinkPrivateKey string `json:"DirectLinkPrivateKey" required:"false" help:"private key for direct link, if URL authentication is enabled"`
DirectLinkValidDuration int64 `json:"DirectLinkValidDuration" type:"number" default:"30" required:"false" help:"minutes, if URL authentication is enabled"`
driver.RootID driver.RootID
} }

View File

@ -127,19 +127,19 @@ type RefreshTokenResp struct {
type UserInfoResp struct { type UserInfoResp struct {
BaseResp BaseResp
Data struct { Data struct {
UID int64 `json:"uid"` UID uint64 `json:"uid"`
Username string `json:"username"` // Username string `json:"username"`
DisplayName string `json:"displayName"` // DisplayName string `json:"displayName"`
HeadImage string `json:"headImage"` // HeadImage string `json:"headImage"`
Passport string `json:"passport"` // Passport string `json:"passport"`
Mail string `json:"mail"` // Mail string `json:"mail"`
SpaceUsed int64 `json:"spaceUsed"` // SpaceUsed int64 `json:"spaceUsed"`
SpacePermanent int64 `json:"spacePermanent"` // SpacePermanent int64 `json:"spacePermanent"`
SpaceTemp int64 `json:"spaceTemp"` // SpaceTemp int64 `json:"spaceTemp"`
SpaceTempExpr string `json:"spaceTempExpr"` // SpaceTempExpr int64 `json:"spaceTempExpr"`
Vip bool `json:"vip"` // Vip bool `json:"vip"`
DirectTraffic int64 `json:"directTraffic"` // DirectTraffic int64 `json:"directTraffic"`
IsHideUID bool `json:"isHideUID"` // IsHideUID bool `json:"isHideUID"`
} `json:"data"` } `json:"data"`
} }
@ -158,6 +158,13 @@ type DownloadInfoResp struct {
} `json:"data"` } `json:"data"`
} }
type DirectLinkResp struct {
BaseResp
Data struct {
URL string `json:"url"`
} `json:"data"`
}
// 创建文件V2返回 // 创建文件V2返回
type UploadCreateResp struct { type UploadCreateResp struct {
BaseResp BaseResp

View File

@ -70,6 +70,8 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
var reader *stream.SectionReader var reader *stream.SectionReader
var rateLimitedRd io.Reader var rateLimitedRd io.Reader
sliceMD5 := "" sliceMD5 := ""
// 表单
b := bytes.NewBuffer(make([]byte, 0, 2048))
threadG.GoWithLifecycle(errgroup.Lifecycle{ threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error { Before: func(ctx context.Context) error {
if reader == nil { if reader == nil {
@ -84,7 +86,6 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
if err != nil { if err != nil {
return err return err
} }
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
} }
return nil return nil
}, },
@ -92,9 +93,8 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
// 重置分片reader位置因为HashReader、上一次失败已经读取到分片EOF // 重置分片reader位置因为HashReader、上一次失败已经读取到分片EOF
reader.Seek(0, io.SeekStart) reader.Seek(0, io.SeekStart)
// 创建表单数据 b.Reset()
var b bytes.Buffer w := multipart.NewWriter(b)
w := multipart.NewWriter(&b)
// 添加表单字段 // 添加表单字段
err = w.WriteField("preuploadID", createResp.Data.PreuploadID) err = w.WriteField("preuploadID", createResp.Data.PreuploadID)
if err != nil { if err != nil {
@ -109,21 +109,20 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
return err return err
} }
// 写入文件内容 // 写入文件内容
fw, err := w.CreateFormFile("slice", fmt.Sprintf("%s.part%d", file.GetName(), partNumber)) _, err = w.CreateFormFile("slice", fmt.Sprintf("%s.part%d", file.GetName(), partNumber))
if err != nil {
return err
}
_, err = utils.CopyWithBuffer(fw, rateLimitedRd)
if err != nil { if err != nil {
return err return err
} }
headSize := b.Len()
err = w.Close() err = w.Close()
if err != nil { if err != nil {
return err return err
} }
head := bytes.NewReader(b.Bytes()[:headSize])
tail := bytes.NewReader(b.Bytes()[headSize:])
rateLimitedRd = driver.NewLimitedUploadStream(ctx, io.MultiReader(head, reader, tail))
// 创建请求并设置header // 创建请求并设置header
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadDomain+"/upload/v2/file/slice", &b) req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadDomain+"/upload/v2/file/slice", rateLimitedRd)
if err != nil { if err != nil {
return err return err
} }

View File

@ -1,15 +1,20 @@
package _123_open package _123_open
import ( import (
"crypto/md5"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"net/http" "net/http"
"net/url"
"strconv" "strconv"
"strings"
"time" "time"
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
"github.com/google/uuid"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -20,7 +25,8 @@ var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于
RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1) RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1)
UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1) UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1)
FileList = InitApiInfo(Api+"/api/v2/file/list", 3) FileList = InitApiInfo(Api+"/api/v2/file/list", 3)
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 0) DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 5)
DirectLink = InitApiInfo(Api+"/api/v1/direct-link/url", 5)
Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2) Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2)
Move = InitApiInfo(Api+"/api/v1/file/move", 1) Move = InitApiInfo(Api+"/api/v1/file/move", 1)
Rename = InitApiInfo(Api+"/api/v1/file/name", 1) Rename = InitApiInfo(Api+"/api/v1/file/name", 1)
@ -80,8 +86,24 @@ func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCall
} }
func (d *Open123) flushAccessToken() error { func (d *Open123) flushAccessToken() error {
if d.Addition.ClientID != "" { if d.ClientID != "" {
if d.Addition.ClientSecret != "" { if d.RefreshToken != "" {
var resp RefreshTokenResp
_, err := d.Request(RefreshToken, http.MethodPost, func(req *resty.Request) {
req.SetQueryParam("client_id", d.ClientID)
if d.ClientSecret != "" {
req.SetQueryParam("client_secret", d.ClientSecret)
}
req.SetQueryParam("grant_type", "refresh_token")
req.SetQueryParam("refresh_token", d.RefreshToken)
}, &resp)
if err != nil {
return err
}
d.AccessToken = resp.AccessToken
d.RefreshToken = resp.RefreshToken
op.MustSaveDriverStorage(d)
} else if d.ClientSecret != "" {
var resp AccessTokenResp var resp AccessTokenResp
_, err := d.Request(AccessToken, http.MethodPost, func(req *resty.Request) { _, err := d.Request(AccessToken, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
@ -94,24 +116,38 @@ func (d *Open123) flushAccessToken() error {
} }
d.AccessToken = resp.Data.AccessToken d.AccessToken = resp.Data.AccessToken
op.MustSaveDriverStorage(d) op.MustSaveDriverStorage(d)
} else if d.Addition.RefreshToken != "" {
var resp RefreshTokenResp
_, err := d.Request(RefreshToken, http.MethodPost, func(req *resty.Request) {
req.SetQueryParam("client_id", d.ClientID)
req.SetQueryParam("grant_type", "refresh_token")
req.SetQueryParam("refresh_token", d.Addition.RefreshToken)
}, &resp)
if err != nil {
return err
}
d.AccessToken = resp.AccessToken
d.RefreshToken = resp.RefreshToken
op.MustSaveDriverStorage(d)
} }
} }
return nil return nil
} }
func (d *Open123) SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (newURL string, err error) {
// 生成Unix时间戳
ts := time.Now().Add(validDuration).Unix()
// 生成随机数建议使用UUID不能包含中划线-
rand := strings.ReplaceAll(uuid.New().String(), "-", "")
// 解析URL
objURL, err := url.Parse(originURL)
if err != nil {
return "", err
}
// 待签名字符串格式path-timestamp-rand-uid-privateKey
unsignedStr := fmt.Sprintf("%s-%d-%s-%d-%s", objURL.Path, ts, rand, uid, privateKey)
md5Hash := md5.Sum([]byte(unsignedStr))
// 生成鉴权参数格式timestamp-rand-uid-md5hash
authKey := fmt.Sprintf("%d-%s-%d-%x", ts, rand, uid, md5Hash)
// 添加鉴权参数到URL查询参数
v := objURL.Query()
v.Add("auth_key", authKey)
objURL.RawQuery = v.Encode()
return objURL.String(), nil
}
func (d *Open123) getUserInfo() (*UserInfoResp, error) { func (d *Open123) getUserInfo() (*UserInfoResp, error) {
var resp UserInfoResp var resp UserInfoResp
@ -122,6 +158,18 @@ func (d *Open123) getUserInfo() (*UserInfoResp, error) {
return &resp, nil return &resp, nil
} }
func (d *Open123) getUID() (uint64, error) {
if d.UID != 0 {
return d.UID, nil
}
resp, err := d.getUserInfo()
if err != nil {
return 0, err
}
d.UID = resp.Data.UID
return resp.Data.UID, nil
}
func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*FileListResp, error) { func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*FileListResp, error) {
var resp FileListResp var resp FileListResp
@ -159,6 +207,21 @@ func (d *Open123) getDownloadInfo(fileId int64) (*DownloadInfoResp, error) {
return &resp, nil return &resp, nil
} }
func (d *Open123) getDirectLink(fileId int64) (*DirectLinkResp, error) {
var resp DirectLinkResp
_, err := d.Request(DirectLink, http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"fileID": strconv.FormatInt(fileId, 10),
})
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Open123) mkdir(parentID int64, name string) error { func (d *Open123) mkdir(parentID int64, name string) error {
_, err := d.Request(Mkdir, http.MethodPost, func(req *resty.Request) { _, err := d.Request(Mkdir, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{

View File

@ -534,16 +534,15 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
if size > partSize { if size > partSize {
part = (size + partSize - 1) / partSize part = (size + partSize - 1) / partSize
} }
// 生成所有 partInfos
partInfos := make([]PartInfo, 0, part) partInfos := make([]PartInfo, 0, part)
for i := int64(0); i < part; i++ { for i := int64(0); i < part; i++ {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
} }
start := i * partSize start := i * partSize
byteSize := size - start byteSize := min(size-start, partSize)
if byteSize > partSize {
byteSize = partSize
}
partNumber := i + 1 partNumber := i + 1
partInfo := PartInfo{ partInfo := PartInfo{
PartNumber: partNumber, PartNumber: partNumber,
@ -591,17 +590,20 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
// resp.Data.RapidUpload: true 支持快传,但此处直接检测是否返回分片的上传地址 // resp.Data.RapidUpload: true 支持快传,但此处直接检测是否返回分片的上传地址
// 快传的情况下同样需要手动处理冲突 // 快传的情况下同样需要手动处理冲突
if resp.Data.PartInfos != nil { if resp.Data.PartInfos != nil {
// 读取前100个分片的上传地址 // Progress
uploadPartInfos := resp.Data.PartInfos p := driver.NewProgress(size, up)
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
// 获取后续分片的上传地址 // 先上传前100个分片
for i := 101; i < len(partInfos); i += 100 { err = d.uploadPersonalParts(ctx, partInfos, resp.Data.PartInfos, rateLimited, p)
end := i + 100 if err != nil {
if end > len(partInfos) { return err
end = len(partInfos)
} }
batchPartInfos := partInfos[i:end]
// 如果还有剩余分片,分批获取上传地址并上传
for i := 100; i < len(partInfos); i += 100 {
end := min(i+100, len(partInfos))
batchPartInfos := partInfos[i:end]
moredata := base.Json{ moredata := base.Json{
"fileId": resp.Data.FileId, "fileId": resp.Data.FileId,
"uploadId": resp.Data.UploadId, "uploadId": resp.Data.UploadId,
@ -617,44 +619,13 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
if err != nil { if err != nil {
return err return err
} }
uploadPartInfos = append(uploadPartInfos, moreresp.Data.PartInfos...) err = d.uploadPersonalParts(ctx, partInfos, moreresp.Data.PartInfos, rateLimited, p)
}
// Progress
p := driver.NewProgress(size, up)
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
// 上传所有分片
for _, uploadPartInfo := range uploadPartInfos {
index := uploadPartInfo.PartNumber - 1
partSize := partInfos[index].PartSize
log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos))
limitReader := io.LimitReader(rateLimited, partSize)
// Update Progress
r := io.TeeReader(limitReader, p)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartInfo.UploadUrl, r)
if err != nil { if err != nil {
return err return err
} }
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", fmt.Sprint(partSize))
req.Header.Set("Origin", "https://yun.139.com")
req.Header.Set("Referer", "https://yun.139.com/")
req.ContentLength = partSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
_ = res.Body.Close()
log.Debugf("[139] uploaded: %+v", res)
if res.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
}
} }
// 全部分片上传完毕后complete
data = base.Json{ data = base.Json{
"contentHash": fullHash, "contentHash": fullHash,
"contentHashAlgorithm": "SHA256", "contentHashAlgorithm": "SHA256",

View File

@ -1,9 +1,11 @@
package _139 package _139
import ( import (
"context"
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
"io"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@ -13,6 +15,7 @@ import (
"time" "time"
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
@ -623,3 +626,47 @@ func (d *Yun139) getPersonalCloudHost() string {
} }
return d.PersonalCloudHost return d.PersonalCloudHost
} }
func (d *Yun139) uploadPersonalParts(ctx context.Context, partInfos []PartInfo, uploadPartInfos []PersonalPartInfo, rateLimited *driver.RateLimitReader, p *driver.Progress) error {
// 确保数组以 PartNumber 从小到大排序
sort.Slice(uploadPartInfos, func(i, j int) bool {
return uploadPartInfos[i].PartNumber < uploadPartInfos[j].PartNumber
})
for _, uploadPartInfo := range uploadPartInfos {
index := uploadPartInfo.PartNumber - 1
if index < 0 || index >= len(partInfos) {
return fmt.Errorf("invalid PartNumber %d: index out of bounds (partInfos length: %d)", uploadPartInfo.PartNumber, len(partInfos))
}
partSize := partInfos[index].PartSize
log.Debugf("[139] uploading part %+v/%+v", index, len(partInfos))
limitReader := io.LimitReader(rateLimited, partSize)
r := io.TeeReader(limitReader, p)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartInfo.UploadUrl, r)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", fmt.Sprint(partSize))
req.Header.Set("Origin", "https://yun.139.com")
req.Header.Set("Referer", "https://yun.139.com/")
req.ContentLength = partSize
err = func() error {
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
log.Debugf("[139] uploaded: %+v", res)
if res.StatusCode != http.StatusOK {
body, _ := io.ReadAll(res.Body)
return fmt.Errorf("unexpected status code: %d, body: %s", res.StatusCode, string(body))
}
return nil
}()
if err != nil {
return err
}
}
return nil
}

View File

@ -1,7 +1,6 @@
package _189_tv package _189_tv
import ( import (
"container/ring"
"context" "context"
"net/http" "net/http"
"strconv" "strconv"
@ -12,6 +11,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
) )
@ -21,9 +21,10 @@ type Cloud189TV struct {
client *resty.Client client *resty.Client
tokenInfo *AppSessionResp tokenInfo *AppSessionResp
uploadThread int uploadThread int
familyTransferFolder *ring.Ring
cleanFamilyTransferFile func()
storageConfig driver.Config storageConfig driver.Config
TempUuid string
cron *cron.Cron // 新增 cron 字段
} }
func (y *Cloud189TV) Config() driver.Config { func (y *Cloud189TV) Config() driver.Config {
@ -79,10 +80,17 @@ func (y *Cloud189TV) Init(ctx context.Context) (err error) {
} }
} }
y.cron = cron.NewCron(time.Minute * 5)
y.cron.Do(y.keepAlive)
return return
} }
func (y *Cloud189TV) Drop(ctx context.Context) error { func (y *Cloud189TV) Drop(ctx context.Context) error {
if y.cron != nil {
y.cron.Stop()
y.cron = nil
}
return nil return nil
} }

View File

@ -8,7 +8,6 @@ import (
type Addition struct { type Addition struct {
driver.RootID driver.RootID
AccessToken string `json:"access_token"` AccessToken string `json:"access_token"`
TempUuid string
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"` OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
Type string `json:"type" type:"select" options:"personal,family" default:"personal"` Type string `json:"type" type:"select" options:"personal,family" default:"personal"`

View File

@ -66,6 +66,10 @@ func (y *Cloud189TV) AppKeySignatureHeader(url, method string) map[string]string
} }
func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, isFamily ...bool) ([]byte, error) { func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, isFamily ...bool) ([]byte, error) {
return y.requestWithRetry(url, method, callback, params, resp, 0, isFamily...)
}
func (y *Cloud189TV) requestWithRetry(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, retryCount int, isFamily ...bool) ([]byte, error) {
req := y.client.R().SetQueryParams(clientSuffix()) req := y.client.R().SetQueryParams(clientSuffix())
if params != nil { if params != nil {
@ -91,8 +95,23 @@ func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, para
if strings.Contains(res.String(), "userSessionBO is null") || if strings.Contains(res.String(), "userSessionBO is null") ||
strings.Contains(res.String(), "InvalidSessionKey") { strings.Contains(res.String(), "InvalidSessionKey") {
// 限制重试次数,避免无限递归
if retryCount >= 3 {
y.Addition.AccessToken = ""
op.MustSaveDriverStorage(y)
return nil, errors.New("session expired after retry")
}
// 尝试刷新会话
if err := y.refreshSession(); err != nil {
// 如果刷新失败说明AccessToken也已过期需要重新登录
y.Addition.AccessToken = ""
op.MustSaveDriverStorage(y)
return nil, errors.New("session expired") return nil, errors.New("session expired")
} }
// 如果刷新成功,则重试原始请求(增加重试计数)
return y.requestWithRetry(url, method, callback, params, resp, retryCount+1, isFamily...)
}
// 处理错误 // 处理错误
if erron.HasError() { if erron.HasError() {
@ -131,6 +150,7 @@ func (y *Cloud189TV) put(ctx context.Context, url string, headers map[string]str
} }
} }
// 请求完成后http.Client会Close Request.Body
resp, err := base.HttpClient.Do(req) resp, err := base.HttpClient.Do(req)
if err != nil { if err != nil {
return nil, err return nil, err
@ -210,7 +230,7 @@ func (y *Cloud189TV) login() (err error) {
var erron RespErr var erron RespErr
var tokenInfo AppSessionResp var tokenInfo AppSessionResp
if y.Addition.AccessToken == "" { if y.Addition.AccessToken == "" {
if y.Addition.TempUuid == "" { if y.TempUuid == "" {
// 获取登录参数 // 获取登录参数
var uuidInfo UuidInfoResp var uuidInfo UuidInfoResp
req.SetResult(&uuidInfo).SetError(&erron) req.SetResult(&uuidInfo).SetError(&erron)
@ -229,7 +249,7 @@ func (y *Cloud189TV) login() (err error) {
if uuidInfo.Uuid == "" { if uuidInfo.Uuid == "" {
return errors.New("uuidInfo is empty") return errors.New("uuidInfo is empty")
} }
y.Addition.TempUuid = uuidInfo.Uuid y.TempUuid = uuidInfo.Uuid
op.MustSaveDriverStorage(y) op.MustSaveDriverStorage(y)
// 展示二维码 // 展示二维码
@ -257,7 +277,7 @@ func (y *Cloud189TV) login() (err error) {
// Signature // Signature
req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/qrcodeLoginResult.action", req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/qrcodeLoginResult.action",
http.MethodGet)) http.MethodGet))
req.SetQueryParam("uuid", y.Addition.TempUuid) req.SetQueryParam("uuid", y.TempUuid)
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/qrcodeLoginResult.action") _, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/qrcodeLoginResult.action")
if err != nil { if err != nil {
return return
@ -269,7 +289,6 @@ func (y *Cloud189TV) login() (err error) {
return errors.New("E189AccessToken is empty") return errors.New("E189AccessToken is empty")
} }
y.Addition.AccessToken = accessTokenResp.E189AccessToken y.Addition.AccessToken = accessTokenResp.E189AccessToken
y.Addition.TempUuid = ""
} }
} }
// 获取SessionKey 和 SessionSecret // 获取SessionKey 和 SessionSecret
@ -293,6 +312,44 @@ func (y *Cloud189TV) login() (err error) {
return return
} }
// refreshSession 尝试使用现有的 AccessToken 刷新会话
func (y *Cloud189TV) refreshSession() (err error) {
var erron RespErr
var tokenInfo AppSessionResp
reqb := y.client.R().SetQueryParams(clientSuffix())
reqb.SetResult(&tokenInfo).SetError(&erron)
// Signature
reqb.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/loginFamilyMerge.action",
http.MethodGet))
reqb.SetQueryParam("e189AccessToken", y.Addition.AccessToken)
_, err = reqb.Execute(http.MethodGet, ApiUrl+"/family/manage/loginFamilyMerge.action")
if err != nil {
return
}
if erron.HasError() {
return &erron
}
y.tokenInfo = &tokenInfo
return nil
}
func (y *Cloud189TV) keepAlive() {
_, err := y.get(ApiUrl+"/keepUserSession.action", func(r *resty.Request) {
r.SetQueryParams(clientSuffix())
}, nil)
if err != nil {
utils.Log.Warnf("189tv: Failed to keep user session alive: %v", err)
// 如果keepAlive失败尝试刷新session
if refreshErr := y.refreshSession(); refreshErr != nil {
utils.Log.Errorf("189tv: Failed to refresh session after keepAlive error: %v", refreshErr)
}
} else {
utils.Log.Debugf("189tv: User session kept alive successfully.")
}
}
func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, isFamily bool, overwrite bool) (model.Obj, error) { func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, isFamily bool, overwrite bool) (model.Obj, error) {
fileMd5 := stream.GetHash().GetHash(utils.MD5) fileMd5 := stream.GetHash().GetHash(utils.MD5)
if len(fileMd5) < utils.MD5.Width { if len(fileMd5) < utils.MD5.Width {
@ -333,6 +390,10 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
// 网盘中不存在该文件,开始上传 // 网盘中不存在该文件,开始上传
status := GetUploadFileStatusResp{CreateUploadFileResp: *uploadInfo} status := GetUploadFileStatusResp{CreateUploadFileResp: *uploadInfo}
// driver.RateLimitReader会尝试Close底层的reader
// 但这里的tempFile是一个*os.FileClose后就没法继续读了
// 所以这里用io.NopCloser包一层
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile))
for status.GetSize() < file.GetSize() && status.FileDataExists != 1 { for status.GetSize() < file.GetSize() && status.FileDataExists != 1 {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return nil, ctx.Err() return nil, ctx.Err()
@ -350,7 +411,7 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId) header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
} }
_, err := y.put(ctx, status.FileUploadUrl, header, true, tempFile, isFamily) _, err := y.put(ctx, status.FileUploadUrl, header, true, rateLimitedRd, isFamily)
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" { if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
return nil, err return nil, err
} }

View File

@ -12,6 +12,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
"github.com/google/uuid" "github.com/google/uuid"
@ -21,11 +22,11 @@ type Cloud189PC struct {
model.Storage model.Storage
Addition Addition
identity string
client *resty.Client client *resty.Client
loginParam *LoginParam loginParam *LoginParam
qrcodeParam *QRLoginParam
tokenInfo *AppSessionResp tokenInfo *AppSessionResp
uploadThread int uploadThread int
@ -35,6 +36,7 @@ type Cloud189PC struct {
storageConfig driver.Config storageConfig driver.Config
ref *Cloud189PC ref *Cloud189PC
cron *cron.Cron
} }
func (y *Cloud189PC) Config() driver.Config { func (y *Cloud189PC) Config() driver.Config {
@ -84,14 +86,22 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
}) })
} }
// 避免重复登陆 // 先尝试用Token刷新之后尝试登陆
identity := utils.GetMD5EncodeStr(y.Username + y.Password) if y.Addition.RefreshToken != "" {
if !y.isLogin() || y.identity != identity { y.tokenInfo = &AppSessionResp{RefreshToken: y.Addition.RefreshToken}
y.identity = identity if err = y.refreshToken(); err != nil {
return
}
} else {
if err = y.login(); err != nil { if err = y.login(); err != nil {
return return
} }
} }
// 初始化并启动 cron 任务
y.cron = cron.NewCron(time.Duration(time.Minute * 5))
// 每5分钟执行一次 keepAlive
y.cron.Do(y.keepAlive)
} }
// 处理家庭云ID // 处理家庭云ID
@ -128,6 +138,10 @@ func (d *Cloud189PC) InitReference(storage driver.Driver) error {
func (y *Cloud189PC) Drop(ctx context.Context) error { func (y *Cloud189PC) Drop(ctx context.Context) error {
y.ref = nil y.ref = nil
if y.cron != nil {
y.cron.Stop()
y.cron = nil
}
return nil return nil
} }

View File

@ -80,6 +80,20 @@ func timestamp() int64 {
return time.Now().UTC().UnixNano() / 1e6 return time.Now().UTC().UnixNano() / 1e6
} }
// formatDate formats a time.Time object into the "YYYY-MM-DDHH:mm:ssSSS" format.
func formatDate(t time.Time) string {
// The layout string "2006-01-0215:04:05.000" corresponds to:
// 2006 -> Year (YYYY)
// 01 -> Month (MM)
// 02 -> Day (DD)
// 15 -> Hour (HH)
// 04 -> Minute (mm)
// 05 -> Second (ss)
// 000 -> Millisecond (SSS) with leading zeros
// Note the lack of a separator between the date and hour, matching the desired output.
return t.Format("2006-01-0215:04:05.000")
}
func MustParseTime(str string) *time.Time { func MustParseTime(str string) *time.Time {
lastOpTime, _ := time.ParseInLocation("2006-01-02 15:04:05 -07", str+" +08", time.Local) lastOpTime, _ := time.ParseInLocation("2006-01-02 15:04:05 -07", str+" +08", time.Local)
return &lastOpTime return &lastOpTime

View File

@ -6,9 +6,11 @@ import (
) )
type Addition struct { type Addition struct {
LoginType string `json:"login_type" type:"select" options:"password,qrcode" default:"password" required:"true"`
Username string `json:"username" required:"true"` Username string `json:"username" required:"true"`
Password string `json:"password" required:"true"` Password string `json:"password" required:"true"`
VCode string `json:"validate_code"` VCode string `json:"validate_code"`
RefreshToken string `json:"refresh_token" help:"To switch accounts, please clear this field"`
driver.RootID driver.RootID
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"` OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`

View File

@ -68,15 +68,7 @@ func (e *RespErr) Error() string {
return "" return ""
} }
// 登陆需要的参数 type BaseLoginParam struct {
type LoginParam struct {
// 加密后的用户名和密码
RsaUsername string
RsaPassword string
// rsa密钥
jRsaKey string
// 请求头参数 // 请求头参数
Lt string Lt string
ReqId string ReqId string
@ -88,6 +80,27 @@ type LoginParam struct {
CaptchaToken string CaptchaToken string
} }
// QRLoginParam 用于暂存二维码登录过程中的参数
type QRLoginParam struct {
BaseLoginParam
UUID string `json:"uuid"`
EncodeUUID string `json:"encodeuuid"`
EncryUUID string `json:"encryuuid"`
}
// 登陆需要的参数
type LoginParam struct {
// 加密后的用户名和密码
RsaUsername string
RsaPassword string
// rsa密钥
jRsaKey string
BaseLoginParam
}
// 登陆加密相关 // 登陆加密相关
type EncryptConfResp struct { type EncryptConfResp struct {
Result int `json:"result"` Result int `json:"result"`

View File

@ -29,6 +29,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup" "github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/skip2/go-qrcode"
"github.com/avast/retry-go" "github.com/avast/retry-go"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
@ -54,6 +55,9 @@ const (
MAC = "TELEMAC" MAC = "TELEMAC"
CHANNEL_ID = "web_cloud.189.cn" CHANNEL_ID = "web_cloud.189.cn"
// Error codes
UserInvalidOpenTokenError = "UserInvalidOpenToken"
) )
func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string { func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string {
@ -264,7 +268,14 @@ func (y *Cloud189PC) findFileByName(ctx context.Context, searchName string, fold
} }
} }
func (y *Cloud189PC) login() (err error) { func (y *Cloud189PC) login() error {
if y.LoginType == "qrcode" {
return y.loginByQRCode()
}
return y.loginByPassword()
}
func (y *Cloud189PC) loginByPassword() (err error) {
// 初始化登陆所需参数 // 初始化登陆所需参数
if y.loginParam == nil { if y.loginParam == nil {
if err = y.initLoginParam(); err != nil { if err = y.initLoginParam(); err != nil {
@ -278,11 +289,16 @@ func (y *Cloud189PC) login() (err error) {
// 销毁登陆参数 // 销毁登陆参数
y.loginParam = nil y.loginParam = nil
// 遇到错误,重新加载登陆参数(刷新验证码) // 遇到错误,重新加载登陆参数(刷新验证码)
if err != nil && y.NoUseOcr { if err != nil {
if y.NoUseOcr {
if err1 := y.initLoginParam(); err1 != nil { if err1 := y.initLoginParam(); err1 != nil {
err = fmt.Errorf("err1: %s \nerr2: %s", err, err1) err = fmt.Errorf("err1: %s \nerr2: %s", err, err1)
} }
} }
y.Status = err.Error()
op.MustSaveDriverStorage(y)
}
}() }()
param := y.loginParam param := y.loginParam
@ -336,14 +352,105 @@ func (y *Cloud189PC) login() (err error) {
err = fmt.Errorf(tokenInfo.ResMessage) err = fmt.Errorf(tokenInfo.ResMessage)
return return
} }
y.Addition.RefreshToken = tokenInfo.RefreshToken
y.tokenInfo = &tokenInfo y.tokenInfo = &tokenInfo
op.MustSaveDriverStorage(y)
return return
} }
/* 初始化登陆需要的参数 func (y *Cloud189PC) loginByQRCode() error {
* 如果遇到验证码返回错误 if y.qrcodeParam == nil {
*/ if err := y.initQRCodeParam(); err != nil {
func (y *Cloud189PC) initLoginParam() error { // 二维码也通过错误返回
return err
}
}
var state struct {
Status int `json:"status"`
RedirectUrl string `json:"redirectUrl"`
Msg string `json:"msg"`
}
now := time.Now()
_, err := y.client.R().
SetHeaders(map[string]string{
"Referer": AUTH_URL,
"Reqid": y.qrcodeParam.ReqId,
"lt": y.qrcodeParam.Lt,
}).
SetFormData(map[string]string{
"appId": APP_ID,
"clientType": CLIENT_TYPE,
"returnUrl": RETURN_URL,
"paramId": y.qrcodeParam.ParamId,
"uuid": y.qrcodeParam.UUID,
"encryuuid": y.qrcodeParam.EncryUUID,
"date": formatDate(now),
"timeStamp": fmt.Sprint(now.UTC().UnixNano() / 1e6),
}).
ForceContentType("application/json;charset=UTF-8").
SetResult(&state).
Post(AUTH_URL + "/api/logbox/oauth2/qrcodeLoginState.do")
if err != nil {
return fmt.Errorf("failed to check QR code state: %w", err)
}
switch state.Status {
case 0: // 登录成功
var tokenInfo AppSessionResp
_, err = y.client.R().
SetResult(&tokenInfo).
SetQueryParams(clientSuffix()).
SetQueryParam("redirectURL", state.RedirectUrl).
Post(API_URL + "/getSessionForPC.action")
if err != nil {
return err
}
if tokenInfo.ResCode != 0 {
return fmt.Errorf(tokenInfo.ResMessage)
}
y.Addition.RefreshToken = tokenInfo.RefreshToken
y.tokenInfo = &tokenInfo
op.MustSaveDriverStorage(y)
return nil
case -11001: // 二维码过期
y.qrcodeParam = nil
return errors.New("QR code expired, please try again")
case -106: // 等待扫描
return y.genQRCode("QR code has not been scanned yet, please scan and save again")
case -11002: // 等待确认
return y.genQRCode("QR code has been scanned, please confirm the login on your phone and save again")
default: // 其他错误
y.qrcodeParam = nil
return fmt.Errorf("QR code login failed with status %d: %s", state.Status, state.Msg)
}
}
func (y *Cloud189PC) genQRCode(text string) error {
// 展示二维码
qrTemplate := `<body>
state: %s
<br><img src="data:image/jpeg;base64,%s"/>
<br>Or Click here: <a href="%s">Login</a>
</body>`
// Generate QR code
qrCode, err := qrcode.Encode(y.qrcodeParam.UUID, qrcode.Medium, 256)
if err != nil {
return fmt.Errorf("failed to generate QR code: %v", err)
}
// Encode QR code to base64
qrCodeBase64 := base64.StdEncoding.EncodeToString(qrCode)
// Create the HTML page
qrPage := fmt.Sprintf(qrTemplate, text, qrCodeBase64, y.qrcodeParam.UUID)
return fmt.Errorf("need verify: \n%s", qrPage)
}
func (y *Cloud189PC) initBaseParams() (*BaseLoginParam, error) {
// 清除cookie // 清除cookie
jar, _ := cookiejar.New(nil) jar, _ := cookiejar.New(nil)
y.client.SetCookieJar(jar) y.client.SetCookieJar(jar)
@ -357,17 +464,30 @@ func (y *Cloud189PC) initLoginParam() error {
}). }).
Get(WEB_URL + "/api/portal/unifyLoginForPC.action") Get(WEB_URL + "/api/portal/unifyLoginForPC.action")
if err != nil { if err != nil {
return err return nil, err
} }
param := LoginParam{ return &BaseLoginParam{
CaptchaToken: regexp.MustCompile(`'captchaToken' value='(.+?)'`).FindStringSubmatch(res.String())[1], CaptchaToken: regexp.MustCompile(`'captchaToken' value='(.+?)'`).FindStringSubmatch(res.String())[1],
Lt: regexp.MustCompile(`lt = "(.+?)"`).FindStringSubmatch(res.String())[1], Lt: regexp.MustCompile(`lt = "(.+?)"`).FindStringSubmatch(res.String())[1],
ParamId: regexp.MustCompile(`paramId = "(.+?)"`).FindStringSubmatch(res.String())[1], ParamId: regexp.MustCompile(`paramId = "(.+?)"`).FindStringSubmatch(res.String())[1],
ReqId: regexp.MustCompile(`reqId = "(.+?)"`).FindStringSubmatch(res.String())[1], ReqId: regexp.MustCompile(`reqId = "(.+?)"`).FindStringSubmatch(res.String())[1],
// jRsaKey: regexp.MustCompile(`"j_rsaKey" value="(.+?)"`).FindStringSubmatch(res.String())[1], }, nil
} }
/* 初始化登陆需要的参数
* 如果遇到验证码返回错误
*/
func (y *Cloud189PC) initLoginParam() error {
y.loginParam = nil
baseParam, err := y.initBaseParams()
if err != nil {
return err
}
y.loginParam = &LoginParam{BaseLoginParam: *baseParam}
// 获取rsa公钥 // 获取rsa公钥
var encryptConf EncryptConfResp var encryptConf EncryptConfResp
_, err = y.client.R(). _, err = y.client.R().
@ -378,18 +498,17 @@ func (y *Cloud189PC) initLoginParam() error {
return err return err
} }
param.jRsaKey = fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", encryptConf.Data.PubKey) y.loginParam.jRsaKey = fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", encryptConf.Data.PubKey)
param.RsaUsername = encryptConf.Data.Pre + RsaEncrypt(param.jRsaKey, y.Username) y.loginParam.RsaUsername = encryptConf.Data.Pre + RsaEncrypt(y.loginParam.jRsaKey, y.Username)
param.RsaPassword = encryptConf.Data.Pre + RsaEncrypt(param.jRsaKey, y.Password) y.loginParam.RsaPassword = encryptConf.Data.Pre + RsaEncrypt(y.loginParam.jRsaKey, y.Password)
y.loginParam = &param
// 判断是否需要验证码 // 判断是否需要验证码
resp, err := y.client.R(). resp, err := y.client.R().
SetHeader("REQID", param.ReqId). SetHeader("REQID", y.loginParam.ReqId).
SetFormData(map[string]string{ SetFormData(map[string]string{
"appKey": APP_ID, "appKey": APP_ID,
"accountType": ACCOUNT_TYPE, "accountType": ACCOUNT_TYPE,
"userName": param.RsaUsername, "userName": y.loginParam.RsaUsername,
}).Post(AUTH_URL + "/api/logbox/oauth2/needcaptcha.do") }).Post(AUTH_URL + "/api/logbox/oauth2/needcaptcha.do")
if err != nil { if err != nil {
return err return err
@ -401,8 +520,8 @@ func (y *Cloud189PC) initLoginParam() error {
// 拉取验证码 // 拉取验证码
imgRes, err := y.client.R(). imgRes, err := y.client.R().
SetQueryParams(map[string]string{ SetQueryParams(map[string]string{
"token": param.CaptchaToken, "token": y.loginParam.CaptchaToken,
"REQID": param.ReqId, "REQID": y.loginParam.ReqId,
"rnd": fmt.Sprint(timestamp()), "rnd": fmt.Sprint(timestamp()),
}). }).
Get(AUTH_URL + "/api/logbox/oauth2/picCaptcha.do") Get(AUTH_URL + "/api/logbox/oauth2/picCaptcha.do")
@ -429,10 +548,38 @@ func (y *Cloud189PC) initLoginParam() error {
return nil return nil
} }
// getQRCode 获取并返回二维码
func (y *Cloud189PC) initQRCodeParam() (err error) {
y.qrcodeParam = nil
baseParam, err := y.initBaseParams()
if err != nil {
return err
}
var qrcodeParam QRLoginParam
_, err = y.client.R().
SetFormData(map[string]string{"appId": APP_ID}).
ForceContentType("application/json;charset=UTF-8").
SetResult(&qrcodeParam).
Post(AUTH_URL + "/api/logbox/oauth2/getUUID.do")
if err != nil {
return err
}
qrcodeParam.BaseLoginParam = *baseParam
y.qrcodeParam = &qrcodeParam
return y.genQRCode("please scan the QR code with the 189 Cloud app, then save the settings again.")
}
// 刷新会话 // 刷新会话
func (y *Cloud189PC) refreshSession() (err error) { func (y *Cloud189PC) refreshSession() (err error) {
return y.refreshSessionWithRetry(0)
}
func (y *Cloud189PC) refreshSessionWithRetry(retryCount int) (err error) {
if y.ref != nil { if y.ref != nil {
return y.ref.refreshSession() return y.ref.refreshSessionWithRetry(retryCount)
} }
var erron RespErr var erron RespErr
var userSessionResp UserSessionResp var userSessionResp UserSessionResp
@ -449,37 +596,102 @@ func (y *Cloud189PC) refreshSession() (err error) {
return err return err
} }
// 错误影响正常访问,下线该储存 // token生效刷新token
defer func() {
if err != nil {
y.GetStorage().SetStatus(fmt.Sprintf("%+v", err.Error()))
op.MustSaveDriverStorage(y)
}
}()
if erron.HasError() { if erron.HasError() {
if erron.ResCode == "UserInvalidOpenToken" { if erron.ResCode == UserInvalidOpenTokenError {
if err = y.login(); err != nil { return y.refreshTokenWithRetry(retryCount)
return err
}
} }
return &erron return &erron
} }
y.tokenInfo.UserSessionResp = userSessionResp y.tokenInfo.UserSessionResp = userSessionResp
return return nil
}
// refreshToken 刷新token失败时返回错误不再直接调用login
func (y *Cloud189PC) refreshToken() (err error) {
return y.refreshTokenWithRetry(0)
}
func (y *Cloud189PC) refreshTokenWithRetry(retryCount int) (err error) {
if y.ref != nil {
return y.ref.refreshTokenWithRetry(retryCount)
}
// 限制重试次数,避免无限递归
if retryCount >= 3 {
if y.Addition.RefreshToken != "" {
y.Addition.RefreshToken = ""
op.MustSaveDriverStorage(y)
}
return errors.New("refresh token failed after maximum retries")
}
var erron RespErr
var tokenInfo AppSessionResp
_, err = y.client.R().
SetResult(&tokenInfo).
ForceContentType("application/json;charset=UTF-8").
SetError(&erron).
SetFormData(map[string]string{
"clientId": APP_ID,
"refreshToken": y.tokenInfo.RefreshToken,
"grantType": "refresh_token",
"format": "json",
}).
Post(AUTH_URL + "/api/oauth2/refreshToken.do")
if err != nil {
return err
}
// 如果刷新失败,返回错误给上层处理
if erron.HasError() {
if y.Addition.RefreshToken != "" {
y.Addition.RefreshToken = ""
op.MustSaveDriverStorage(y)
}
// 根据登录类型决定下一步行为
if y.LoginType == "qrcode" {
return errors.New("QR code session has expired, please re-scan the code to log in")
}
// 密码登录模式下,尝试回退到完整登录
return y.login()
}
y.Addition.RefreshToken = tokenInfo.RefreshToken
y.tokenInfo = &tokenInfo
op.MustSaveDriverStorage(y)
return y.refreshSessionWithRetry(retryCount + 1)
}
func (y *Cloud189PC) keepAlive() {
_, err := y.get(API_URL+"/keepUserSession.action", func(r *resty.Request) {
r.SetQueryParams(clientSuffix())
}, nil)
if err != nil {
utils.Log.Warnf("189pc: Failed to keep user session alive: %v", err)
// 如果keepAlive失败尝试刷新session
if refreshErr := y.refreshSession(); refreshErr != nil {
utils.Log.Errorf("189pc: Failed to refresh session after keepAlive error: %v", refreshErr)
}
} else {
utils.Log.Debugf("189pc: User session kept alive successfully.")
}
} }
// 普通上传 // 普通上传
// 无法上传大小为0的文件 // 无法上传大小为0的文件
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
size := file.GetSize() // 文件大小
sliceSize := min(size, partSize(size)) fileSize := file.GetSize()
// 分片大小,不得为文件大小
sliceSize := partSize(fileSize)
params := Params{ params := Params{
"parentFolderId": dstDir.GetID(), "parentFolderId": dstDir.GetID(),
"fileName": url.QueryEscape(file.GetName()), "fileName": url.QueryEscape(file.GetName()),
"fileSize": fmt.Sprint(file.GetSize()), "fileSize": fmt.Sprint(fileSize),
"sliceSize": fmt.Sprint(sliceSize), "sliceSize": fmt.Sprint(sliceSize), // 必须为特定分片大小
"lazyCheck": "1", "lazyCheck": "1",
} }
@ -512,10 +724,10 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
retry.DelayType(retry.BackOffDelay)) retry.DelayType(retry.BackOffDelay))
count := 1 count := 1
if size > sliceSize { if fileSize > sliceSize {
count = int((size + sliceSize - 1) / sliceSize) count = int((fileSize + sliceSize - 1) / sliceSize)
} }
lastPartSize := size % sliceSize lastPartSize := fileSize % sliceSize
if lastPartSize == 0 { if lastPartSize == 0 {
lastPartSize = sliceSize lastPartSize = sliceSize
} }
@ -535,9 +747,9 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
break break
} }
offset := int64((i)-1) * sliceSize offset := int64((i)-1) * sliceSize
size := sliceSize partSize := sliceSize
if i == count { if i == count {
size = lastPartSize partSize = lastPartSize
} }
partInfo := "" partInfo := ""
var reader *stream.SectionReader var reader *stream.SectionReader
@ -546,14 +758,14 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
Before: func(ctx context.Context) error { Before: func(ctx context.Context) error {
if reader == nil { if reader == nil {
var err error var err error
reader, err = ss.GetSectionReader(offset, size) reader, err = ss.GetSectionReader(offset, partSize)
if err != nil { if err != nil {
return err return err
} }
silceMd5.Reset() silceMd5.Reset()
w, err := utils.CopyWithBuffer(writers, reader) w, err := utils.CopyWithBuffer(writers, reader)
if w != size { if w != partSize {
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", size, w, err) return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", partSize, w, err)
} }
// 计算块md5并进行hex和base64编码 // 计算块md5并进行hex和base64编码
md5Bytes := silceMd5.Sum(nil) md5Bytes := silceMd5.Sum(nil)
@ -595,7 +807,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
fileMd5Hex = strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil))) fileMd5Hex = strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
} }
sliceMd5Hex := fileMd5Hex sliceMd5Hex := fileMd5Hex
if file.GetSize() > sliceSize { if fileSize > sliceSize {
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n"))) sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
} }

View File

@ -22,7 +22,9 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/chaoxing" _ "github.com/OpenListTeam/OpenList/v4/drivers/chaoxing"
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve" _ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve"
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve_v4" _ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve_v4"
_ "github.com/OpenListTeam/OpenList/v4/drivers/cnb_releases"
_ "github.com/OpenListTeam/OpenList/v4/drivers/crypt" _ "github.com/OpenListTeam/OpenList/v4/drivers/crypt"
_ "github.com/OpenListTeam/OpenList/v4/drivers/degoo"
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao" _ "github.com/OpenListTeam/OpenList/v4/drivers/doubao"
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao_share" _ "github.com/OpenListTeam/OpenList/v4/drivers/doubao_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/dropbox" _ "github.com/OpenListTeam/OpenList/v4/drivers/dropbox"
@ -39,6 +41,7 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/lanzou" _ "github.com/OpenListTeam/OpenList/v4/drivers/lanzou"
_ "github.com/OpenListTeam/OpenList/v4/drivers/lenovonas_share" _ "github.com/OpenListTeam/OpenList/v4/drivers/lenovonas_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/local" _ "github.com/OpenListTeam/OpenList/v4/drivers/local"
_ "github.com/OpenListTeam/OpenList/v4/drivers/mediafire"
_ "github.com/OpenListTeam/OpenList/v4/drivers/mediatrack" _ "github.com/OpenListTeam/OpenList/v4/drivers/mediatrack"
_ "github.com/OpenListTeam/OpenList/v4/drivers/mega" _ "github.com/OpenListTeam/OpenList/v4/drivers/mega"
_ "github.com/OpenListTeam/OpenList/v4/drivers/misskey" _ "github.com/OpenListTeam/OpenList/v4/drivers/misskey"
@ -48,6 +51,7 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_app" _ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_app"
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_sharelink" _ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_sharelink"
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist" _ "github.com/OpenListTeam/OpenList/v4/drivers/openlist"
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak" _ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak"
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak_share" _ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_open" _ "github.com/OpenListTeam/OpenList/v4/drivers/quark_open"
@ -59,6 +63,7 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/smb" _ "github.com/OpenListTeam/OpenList/v4/drivers/smb"
_ "github.com/OpenListTeam/OpenList/v4/drivers/strm" _ "github.com/OpenListTeam/OpenList/v4/drivers/strm"
_ "github.com/OpenListTeam/OpenList/v4/drivers/teambition" _ "github.com/OpenListTeam/OpenList/v4/drivers/teambition"
_ "github.com/OpenListTeam/OpenList/v4/drivers/teldrive"
_ "github.com/OpenListTeam/OpenList/v4/drivers/terabox" _ "github.com/OpenListTeam/OpenList/v4/drivers/terabox"
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder" _ "github.com/OpenListTeam/OpenList/v4/drivers/thunder"
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder_browser" _ "github.com/OpenListTeam/OpenList/v4/drivers/thunder_browser"

View File

@ -21,6 +21,8 @@ type CloudreveV4 struct {
model.Storage model.Storage
Addition Addition
ref *CloudreveV4 ref *CloudreveV4
AccessExpires string
RefreshExpires string
} }
func (d *CloudreveV4) Config() driver.Config { func (d *CloudreveV4) Config() driver.Config {
@ -44,13 +46,17 @@ func (d *CloudreveV4) Init(ctx context.Context) error {
if d.ref != nil { if d.ref != nil {
return nil return nil
} }
if d.AccessToken == "" && d.RefreshToken != "" { if d.canLogin() {
return d.refreshToken()
}
if d.Username != "" {
return d.login() return d.login()
} }
return nil if d.RefreshToken != "" {
return d.refreshToken()
}
if d.AccessToken == "" {
return errors.New("no way to authenticate. At least AccessToken is required")
}
// ensure AccessToken is valid
return d.parseJWT(d.AccessToken, &AccessJWT{})
} }
func (d *CloudreveV4) InitReference(storage driver.Driver) error { func (d *CloudreveV4) InitReference(storage driver.Driver) error {

View File

@ -66,11 +66,27 @@ type CaptchaResp struct {
Ticket string `json:"ticket"` Ticket string `json:"ticket"`
} }
type AccessJWT struct {
TokenType string `json:"token_type"`
Sub string `json:"sub"`
Exp int64 `json:"exp"`
Nbf int64 `json:"nbf"`
}
type RefreshJWT struct {
TokenType string `json:"token_type"`
Sub string `json:"sub"`
Exp int `json:"exp"`
Nbf int `json:"nbf"`
StateHash string `json:"state_hash"`
RootTokenID string `json:"root_token_id"`
}
type Token struct { type Token struct {
AccessToken string `json:"access_token"` AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"` RefreshToken string `json:"refresh_token"`
AccessExpires time.Time `json:"access_expires"` AccessExpires string `json:"access_expires"`
RefreshExpires time.Time `json:"refresh_expires"` RefreshExpires string `json:"refresh_expires"`
} }
type TokenResponse struct { type TokenResponse struct {

View File

@ -28,6 +28,15 @@ import (
// do others that not defined in Driver interface // do others that not defined in Driver interface
const (
CodeLoginRequired = http.StatusUnauthorized
CodeCredentialInvalid = 40020 // Failed to issue token
)
var (
ErrorIssueToken = errors.New("failed to issue token")
)
func (d *CloudreveV4) getUA() string { func (d *CloudreveV4) getUA() string {
if d.CustomUA != "" { if d.CustomUA != "" {
return d.CustomUA return d.CustomUA
@ -39,6 +48,23 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
if d.ref != nil { if d.ref != nil {
return d.ref.request(method, path, callback, out) return d.ref.request(method, path, callback, out)
} }
// ensure token
if d.isTokenExpired() {
err := d.refreshToken()
if err != nil {
return err
}
}
return d._request(method, path, callback, out)
}
func (d *CloudreveV4) _request(method string, path string, callback base.ReqCallback, out any) error {
if d.ref != nil {
return d.ref._request(method, path, callback, out)
}
u := d.Address + "/api/v4" + path u := d.Address + "/api/v4" + path
req := base.RestyClient.R() req := base.RestyClient.R()
req.SetHeaders(map[string]string{ req.SetHeaders(map[string]string{
@ -65,15 +91,17 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
} }
if r.Code != 0 { if r.Code != 0 {
if r.Code == 401 && d.RefreshToken != "" && path != "/session/token/refresh" { if r.Code == CodeLoginRequired && d.canLogin() && path != "/session/token/refresh" {
// try to refresh token err = d.login()
err = d.refreshToken()
if err != nil { if err != nil {
return err return err
} }
return d.request(method, path, callback, out) return d.request(method, path, callback, out)
} }
return errors.New(r.Msg) if r.Code == CodeCredentialInvalid {
return ErrorIssueToken
}
return fmt.Errorf("%d: %s", r.Code, r.Msg)
} }
if out != nil && r.Data != nil { if out != nil && r.Data != nil {
@ -91,14 +119,18 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
return nil return nil
} }
func (d *CloudreveV4) canLogin() bool {
return d.Username != "" && d.Password != ""
}
func (d *CloudreveV4) login() error { func (d *CloudreveV4) login() error {
var siteConfig SiteLoginConfigResp var siteConfig SiteLoginConfigResp
err := d.request(http.MethodGet, "/site/config/login", nil, &siteConfig) err := d._request(http.MethodGet, "/site/config/login", nil, &siteConfig)
if err != nil { if err != nil {
return err return err
} }
var prepareLogin PrepareLoginResp var prepareLogin PrepareLoginResp
err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin) err = d._request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
if err != nil { if err != nil {
return err return err
} }
@ -128,7 +160,7 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
} }
if needCaptcha { if needCaptcha {
var config BasicConfigResp var config BasicConfigResp
err = d.request(http.MethodGet, "/site/config/basic", nil, &config) err = d._request(http.MethodGet, "/site/config/basic", nil, &config)
if err != nil { if err != nil {
return err return err
} }
@ -136,7 +168,7 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
return fmt.Errorf("captcha type %s not support", config.CaptchaType) return fmt.Errorf("captcha type %s not support", config.CaptchaType)
} }
var captcha CaptchaResp var captcha CaptchaResp
err = d.request(http.MethodGet, "/site/captcha", nil, &captcha) err = d._request(http.MethodGet, "/site/captcha", nil, &captcha)
if err != nil { if err != nil {
return err return err
} }
@ -162,20 +194,22 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
loginBody["captcha"] = captchaCode loginBody["captcha"] = captchaCode
} }
var token TokenResponse var token TokenResponse
err = d.request(http.MethodPost, "/session/token", func(req *resty.Request) { err = d._request(http.MethodPost, "/session/token", func(req *resty.Request) {
req.SetBody(loginBody) req.SetBody(loginBody)
}, &token) }, &token)
if err != nil { if err != nil {
return err return err
} }
d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken
d.AccessExpires, d.RefreshExpires = token.Token.AccessExpires, token.Token.RefreshExpires
op.MustSaveDriverStorage(d) op.MustSaveDriverStorage(d)
return nil return nil
} }
func (d *CloudreveV4) refreshToken() error { func (d *CloudreveV4) refreshToken() error {
// if no refresh token, try to login if possible
if d.RefreshToken == "" { if d.RefreshToken == "" {
if d.Username != "" { if d.canLogin() {
err := d.login() err := d.login()
if err != nil { if err != nil {
return fmt.Errorf("cannot login to get refresh token, error: %s", err) return fmt.Errorf("cannot login to get refresh token, error: %s", err)
@ -183,20 +217,127 @@ func (d *CloudreveV4) refreshToken() error {
} }
return nil return nil
} }
// parse jwt to check if refresh token is valid
var jwt RefreshJWT
err := d.parseJWT(d.RefreshToken, &jwt)
if err != nil {
// if refresh token is invalid, try to login if possible
if d.canLogin() {
return d.login()
}
d.GetStorage().SetStatus(fmt.Sprintf("Invalid RefreshToken: %s", err.Error()))
op.MustSaveDriverStorage(d)
return fmt.Errorf("invalid refresh token: %w", err)
}
// do refresh token
var token Token var token Token
err := d.request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) { err = d._request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"refresh_token": d.RefreshToken, "refresh_token": d.RefreshToken,
}) })
}, &token) }, &token)
if err != nil { if err != nil {
if errors.Is(err, ErrorIssueToken) {
if d.canLogin() {
// try to login again
return d.login()
}
d.GetStorage().SetStatus("This session is no longer valid")
op.MustSaveDriverStorage(d)
return ErrorIssueToken
}
return err return err
} }
d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken
d.AccessExpires, d.RefreshExpires = token.AccessExpires, token.RefreshExpires
op.MustSaveDriverStorage(d) op.MustSaveDriverStorage(d)
return nil return nil
} }
func (d *CloudreveV4) parseJWT(token string, jwt any) error {
split := strings.Split(token, ".")
if len(split) != 3 {
return fmt.Errorf("invalid token length: %d, ensure the token is a valid JWT", len(split))
}
data, err := base64.RawURLEncoding.DecodeString(split[1])
if err != nil {
return fmt.Errorf("invalid token encoding: %w, ensure the token is a valid JWT", err)
}
err = json.Unmarshal(data, &jwt)
if err != nil {
return fmt.Errorf("invalid token content: %w, ensure the token is a valid JWT", err)
}
return nil
}
// check if token is expired
// https://github.com/cloudreve/frontend/blob/ddfacc1c31c49be03beb71de4cc114c8811038d6/src/session/index.ts#L177-L200
func (d *CloudreveV4) isTokenExpired() bool {
if d.RefreshToken == "" {
// login again if username and password is set
if d.canLogin() {
return true
}
// no refresh token, cannot refresh
return false
}
if d.AccessToken == "" {
return true
}
var (
err error
expires time.Time
)
// check if token is expired
if d.AccessExpires != "" {
// use expires field if possible to prevent timezone issue
// only available after login or refresh token
// 2025-08-28T02:43:07.645109985+08:00
expires, err = time.Parse(time.RFC3339Nano, d.AccessExpires)
if err != nil {
return false
}
} else {
// fallback to parse jwt
// if failed, disable the storage
var jwt AccessJWT
err = d.parseJWT(d.AccessToken, &jwt)
if err != nil {
d.GetStorage().SetStatus(fmt.Sprintf("Invalid AccessToken: %s", err.Error()))
op.MustSaveDriverStorage(d)
return false
}
// may be have timezone issue
expires = time.Unix(jwt.Exp, 0)
}
// add a 10 minutes safe margin
ddl := time.Now().Add(10 * time.Minute)
if expires.Before(ddl) {
// current access token expired, check if refresh token is expired
// warning: cannot parse refresh token from jwt, because the exp field is not standard
if d.RefreshExpires != "" {
refreshExpires, err := time.Parse(time.RFC3339Nano, d.RefreshExpires)
if err != nil {
return false
}
if refreshExpires.Before(time.Now()) {
// This session is no longer valid
if d.canLogin() {
// try to login again
return true
}
d.GetStorage().SetStatus("This session is no longer valid")
op.MustSaveDriverStorage(d)
return false
}
}
return true
}
return false
}
func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error { func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
var finish int64 = 0 var finish int64 = 0
var chunk int = 0 var chunk int = 0

View File

@ -0,0 +1,230 @@
package cnb_releases
import (
"bytes"
"context"
"fmt"
"io"
"mime/multipart"
"net/http"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
)
type CnbReleases struct {
model.Storage
Addition
ref *CnbReleases
}
func (d *CnbReleases) Config() driver.Config {
return config
}
func (d *CnbReleases) GetAddition() driver.Additional {
return &d.Addition
}
func (d *CnbReleases) Init(ctx context.Context) error {
return nil
}
func (d *CnbReleases) InitReference(storage driver.Driver) error {
refStorage, ok := storage.(*CnbReleases)
if ok {
d.ref = refStorage
return nil
}
return fmt.Errorf("ref: storage is not CnbReleases")
}
func (d *CnbReleases) Drop(ctx context.Context) error {
d.ref = nil
return nil
}
func (d *CnbReleases) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if dir.GetPath() == "/" {
// get all releases for root dir
var resp ReleaseList
err := d.Request(http.MethodGet, "/{repo}/-/releases", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
}, &resp)
if err != nil {
return nil, err
}
return utils.SliceConvert(resp, func(src Release) (model.Obj, error) {
name := src.Name
if d.UseTagName {
name = src.TagName
}
return &model.Object{
ID: src.ID,
Name: name,
Size: d.sumAssetsSize(src.Assets),
Ctime: src.CreatedAt,
Modified: src.UpdatedAt,
IsFolder: true,
}, nil
})
} else {
// get release info by release id
releaseID := dir.GetID()
if releaseID == "" {
return nil, errs.ObjectNotFound
}
var resp Release
err := d.Request(http.MethodGet, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetPathParam("release_id", releaseID)
}, &resp)
if err != nil {
return nil, err
}
return utils.SliceConvert(resp.Assets, func(src ReleaseAsset) (model.Obj, error) {
return &Object{
Object: model.Object{
ID: src.ID,
Path: src.Path,
Name: src.Name,
Size: src.Size,
Ctime: src.CreatedAt,
Modified: src.UpdatedAt,
IsFolder: false,
},
ParentID: dir.GetID(),
}, nil
})
}
}
func (d *CnbReleases) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
return &model.Link{
URL: "https://cnb.cool" + file.GetPath(),
}, nil
}
func (d *CnbReleases) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if parentDir.GetPath() == "/" {
// create a new release
branch := d.DefaultBranch
if branch == "" {
branch = "main" // fallback to "main" if not set
}
return d.Request(http.MethodPost, "/{repo}/-/releases", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetBody(base.Json{
"name": dirName,
"tag_name": dirName,
"target_commitish": branch,
})
}, nil)
}
return errs.NotImplement
}
func (d *CnbReleases) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return nil, errs.NotImplement
}
func (d *CnbReleases) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if srcObj.IsDir() && !d.UseTagName {
return d.Request(http.MethodPatch, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetPathParam("release_id", srcObj.GetID())
req.SetFormData(map[string]string{
"name": newName,
})
}, nil)
}
return errs.NotImplement
}
func (d *CnbReleases) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return nil, errs.NotImplement
}
func (d *CnbReleases) Remove(ctx context.Context, obj model.Obj) error {
if obj.IsDir() {
return d.Request(http.MethodDelete, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetPathParam("release_id", obj.GetID())
}, nil)
}
if o, ok := obj.(*Object); ok {
return d.Request(http.MethodDelete, "/{repo}/-/releases/{release_id}/assets/{asset_id}", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetPathParam("release_id", o.ParentID)
req.SetPathParam("asset_id", obj.GetID())
}, nil)
} else {
return fmt.Errorf("unable to get release ID")
}
}
func (d *CnbReleases) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
// 1. get upload info
var resp ReleaseAssetUploadURL
err := d.Request(http.MethodPost, "/{repo}/-/releases/{release_id}/asset-upload-url", func(req *resty.Request) {
req.SetPathParam("repo", d.Repo)
req.SetPathParam("release_id", dstDir.GetID())
req.SetBody(base.Json{
"asset_name": file.GetName(),
"overwrite": true,
"size": file.GetSize(),
})
}, &resp)
if err != nil {
return err
}
// 2. upload file
// use multipart to create form file
var b bytes.Buffer
w := multipart.NewWriter(&b)
_, err = w.CreateFormFile("file", file.GetName())
if err != nil {
return err
}
headSize := b.Len()
err = w.Close()
if err != nil {
return err
}
head := bytes.NewReader(b.Bytes()[:headSize])
tail := bytes.NewReader(b.Bytes()[headSize:])
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.MultiReader(head, file, tail))
// use net/http to upload file
ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Duration(resp.ExpiresInSec+1)*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctxWithTimeout, http.MethodPost, resp.UploadURL, rateLimitedRd)
if err != nil {
return err
}
req.Header.Set("Content-Type", w.FormDataContentType())
req.Header.Set("User-Agent", base.UserAgent)
httpResp, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer httpResp.Body.Close()
if httpResp.StatusCode != http.StatusNoContent {
return fmt.Errorf("upload file failed: %s", httpResp.Status)
}
// 3. verify upload
return d.Request(http.MethodPost, resp.VerifyURL, nil, nil)
}
var _ driver.Driver = (*CnbReleases)(nil)

View File

@ -0,0 +1,26 @@
package cnb_releases
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
driver.RootPath
Repo string `json:"repo" type:"string" required:"true"`
Token string `json:"token" type:"string" required:"true"`
UseTagName bool `json:"use_tag_name" type:"bool" default:"false" help:"Use tag name instead of release name"`
DefaultBranch string `json:"default_branch" type:"string" default:"main" help:"Default branch for new releases"`
}
var config = driver.Config{
Name: "CNB Releases",
LocalSort: true,
DefaultRoot: "/",
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &CnbReleases{}
})
}

View File

@ -0,0 +1,100 @@
package cnb_releases
import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
)
type Object struct {
model.Object
ParentID string
}
type TagList []Tag
type Tag struct {
Commit struct {
Author UserInfo `json:"author"`
Commit CommitObject `json:"commit"`
Committer UserInfo `json:"committer"`
Parents []CommitParent `json:"parents"`
Sha string `json:"sha"`
} `json:"commit"`
Name string `json:"name"`
Target string `json:"target"`
TargetType string `json:"target_type"`
Verification TagObjectVerification `json:"verification"`
}
type UserInfo struct {
Freeze bool `json:"freeze"`
Nickname string `json:"nickname"`
Username string `json:"username"`
}
type CommitObject struct {
Author Signature `json:"author"`
CommentCount int `json:"comment_count"`
Committer Signature `json:"committer"`
Message string `json:"message"`
Tree CommitObjectTree `json:"tree"`
Verification CommitObjectVerification `json:"verification"`
}
type Signature struct {
Date time.Time `json:"date"`
Email string `json:"email"`
Name string `json:"name"`
}
type CommitObjectTree struct {
Sha string `json:"sha"`
}
type CommitObjectVerification struct {
Payload string `json:"payload"`
Reason string `json:"reason"`
Signature string `json:"signature"`
Verified bool `json:"verified"`
VerifiedAt string `json:"verified_at"`
}
type CommitParent = CommitObjectTree
type TagObjectVerification = CommitObjectVerification
type ReleaseList []Release
type Release struct {
Assets []ReleaseAsset `json:"assets"`
Author UserInfo `json:"author"`
Body string `json:"body"`
CreatedAt time.Time `json:"created_at"`
Draft bool `json:"draft"`
ID string `json:"id"`
IsLatest bool `json:"is_latest"`
Name string `json:"name"`
Prerelease bool `json:"prerelease"`
PublishedAt time.Time `json:"published_at"`
TagCommitish string `json:"tag_commitish"`
TagName string `json:"tag_name"`
UpdatedAt time.Time `json:"updated_at"`
}
type ReleaseAsset struct {
ContentType string `json:"content_type"`
CreatedAt time.Time `json:"created_at"`
ID string `json:"id"`
Name string `json:"name"`
Path string `json:"path"`
Size int64 `json:"size"`
UpdatedAt time.Time `json:"updated_at"`
Uploader UserInfo `json:"uploader"`
}
type ReleaseAssetUploadURL struct {
UploadURL string `json:"upload_url"`
ExpiresInSec int `json:"expires_in_sec"`
VerifyURL string `json:"verify_url"`
}

View File

@ -0,0 +1,58 @@
package cnb_releases
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
log "github.com/sirupsen/logrus"
)
// do others that not defined in Driver interface
func (d *CnbReleases) Request(method string, path string, callback base.ReqCallback, resp any) error {
if d.ref != nil {
return d.ref.Request(method, path, callback, resp)
}
var url string
if strings.HasPrefix(path, "http") {
url = path
} else {
url = "https://api.cnb.cool" + path
}
req := base.RestyClient.R()
req.SetHeader("Accept", "application/json")
req.SetAuthScheme("Bearer")
req.SetAuthToken(d.Token)
if callback != nil {
callback(req)
}
res, err := req.Execute(method, url)
log.Debugln(res.String())
if err != nil {
return err
}
if res.StatusCode() != http.StatusOK && res.StatusCode() != http.StatusCreated && res.StatusCode() != http.StatusNoContent {
return fmt.Errorf("failed to request %s, status code: %d, message: %s", url, res.StatusCode(), res.String())
}
if resp != nil {
err = json.Unmarshal(res.Body(), resp)
if err != nil {
return err
}
}
return nil
}
func (d *CnbReleases) sumAssetsSize(assets []ReleaseAsset) int64 {
var size int64
for _, asset := range assets {
size += asset.Size
}
return size
}

203
drivers/degoo/driver.go Normal file
View File

@ -0,0 +1,203 @@
package degoo
import (
"context"
"fmt"
"net/http"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
type Degoo struct {
model.Storage
Addition
client *http.Client
}
func (d *Degoo) Config() driver.Config {
return config
}
func (d *Degoo) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Degoo) Init(ctx context.Context) error {
d.client = base.HttpClient
// Ensure we have a valid token (will login if needed or refresh if expired)
if err := d.ensureValidToken(ctx); err != nil {
return fmt.Errorf("failed to initialize token: %w", err)
}
return d.getDevices(ctx)
}
func (d *Degoo) Drop(ctx context.Context) error {
return nil
}
func (d *Degoo) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
items, err := d.getAllFileChildren5(ctx, dir.GetID())
if err != nil {
return nil, err
}
return utils.MustSliceConvert(items, func(s DegooFileItem) model.Obj {
isFolder := s.Category == 2 || s.Category == 1 || s.Category == 10
createTime, modTime, _ := humanReadableTimes(s.CreationTime, s.LastModificationTime, s.LastUploadTime)
size, err := strconv.ParseInt(s.Size, 10, 64)
if err != nil {
size = 0 // Default to 0 if size parsing fails
}
return &model.Object{
ID: s.ID,
Path: s.FilePath,
Name: s.Name,
Size: size,
Modified: modTime,
Ctime: createTime,
IsFolder: isFolder,
}
}), nil
}
func (d *Degoo) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
item, err := d.getOverlay4(ctx, file.GetID())
if err != nil {
return nil, err
}
return &model.Link{URL: item.URL}, nil
}
func (d *Degoo) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
// This is done by calling the setUploadFile3 API with a special checksum and size.
const query = `mutation SetUploadFile3($Token: String!, $FileInfos: [FileInfoUpload3]!) { setUploadFile3(Token: $Token, FileInfos: $FileInfos) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"FileInfos": []map[string]interface{}{
{
"Checksum": folderChecksum,
"Name": dirName,
"CreationTime": time.Now().UnixMilli(),
"ParentID": parentDir.GetID(),
"Size": 0,
},
},
}
_, err := d.apiCall(ctx, "SetUploadFile3", query, variables)
if err != nil {
return err
}
return nil
}
func (d *Degoo) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
const query = `mutation SetMoveFile($Token: String!, $Copy: Boolean, $NewParentID: String!, $FileIDs: [String]!) { setMoveFile(Token: $Token, Copy: $Copy, NewParentID: $NewParentID, FileIDs: $FileIDs) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"Copy": false,
"NewParentID": dstDir.GetID(),
"FileIDs": []string{srcObj.GetID()},
}
_, err := d.apiCall(ctx, "SetMoveFile", query, variables)
if err != nil {
return nil, err
}
return srcObj, nil
}
func (d *Degoo) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
const query = `mutation SetRenameFile($Token: String!, $FileRenames: [FileRenameInfo]!) { setRenameFile(Token: $Token, FileRenames: $FileRenames) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"FileRenames": []DegooFileRenameInfo{
{
ID: srcObj.GetID(),
NewName: newName,
},
},
}
_, err := d.apiCall(ctx, "SetRenameFile", query, variables)
if err != nil {
return err
}
return nil
}
func (d *Degoo) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
// Copy is not implemented, Degoo API does not support direct copy.
return nil, errs.NotImplement
}
func (d *Degoo) Remove(ctx context.Context, obj model.Obj) error {
// Remove deletes a file or folder (moves to trash).
const query = `mutation SetDeleteFile5($Token: String!, $IsInRecycleBin: Boolean!, $IDs: [IDType]!) { setDeleteFile5(Token: $Token, IsInRecycleBin: $IsInRecycleBin, IDs: $IDs) }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"IsInRecycleBin": false,
"IDs": []map[string]string{{"FileID": obj.GetID()}},
}
_, err := d.apiCall(ctx, "SetDeleteFile5", query, variables)
return err
}
func (d *Degoo) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
tmpF, err := file.CacheFullAndWriter(&up, nil)
if err != nil {
return err
}
parentID := dstDir.GetID()
// Calculate the checksum for the file.
checksum, err := d.checkSum(tmpF)
if err != nil {
return err
}
// 1. Get upload authorization via getBucketWriteAuth4.
auths, err := d.getBucketWriteAuth4(ctx, file, parentID, checksum)
if err != nil {
return err
}
// 2. Upload file.
// support rapid upload
if auths.GetBucketWriteAuth4[0].Error != "Already exist!" {
err = d.uploadS3(ctx, auths, tmpF, file, checksum)
if err != nil {
return err
}
}
// 3. Register metadata with setUploadFile3.
data, err := d.SetUploadFile3(ctx, file, parentID, checksum)
if err != nil {
return err
}
if !data.SetUploadFile3 {
return fmt.Errorf("setUploadFile3 failed: %v", data)
}
return nil
}

27
drivers/degoo/meta.go Normal file
View File

@ -0,0 +1,27 @@
package degoo
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
driver.RootID
Username string `json:"username" help:"Your Degoo account email"`
Password string `json:"password" help:"Your Degoo account password"`
RefreshToken string `json:"refresh_token" help:"Refresh token for automatic token renewal, obtained automatically"`
AccessToken string `json:"access_token" help:"Access token for Degoo API, obtained automatically"`
}
var config = driver.Config{
Name: "Degoo",
LocalSort: true,
DefaultRoot: "0",
NoOverwriteUpload: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Degoo{}
})
}

110
drivers/degoo/types.go Normal file
View File

@ -0,0 +1,110 @@
package degoo
import (
"encoding/json"
)
// DegooLoginRequest represents the login request body.
type DegooLoginRequest struct {
GenerateToken bool `json:"GenerateToken"`
Username string `json:"Username"`
Password string `json:"Password"`
}
// DegooLoginResponse represents a successful login response.
type DegooLoginResponse struct {
Token string `json:"Token"`
RefreshToken string `json:"RefreshToken"`
}
// DegooAccessTokenRequest represents the token refresh request body.
type DegooAccessTokenRequest struct {
RefreshToken string `json:"RefreshToken"`
}
// DegooAccessTokenResponse represents the token refresh response.
type DegooAccessTokenResponse struct {
AccessToken string `json:"AccessToken"`
}
// DegooFileItem represents a Degoo file or folder.
type DegooFileItem struct {
ID string `json:"ID"`
ParentID string `json:"ParentID"`
Name string `json:"Name"`
Category int `json:"Category"`
Size string `json:"Size"`
URL string `json:"URL"`
CreationTime string `json:"CreationTime"`
LastModificationTime string `json:"LastModificationTime"`
LastUploadTime string `json:"LastUploadTime"`
MetadataID string `json:"MetadataID"`
DeviceID int64 `json:"DeviceID"`
FilePath string `json:"FilePath"`
IsInRecycleBin bool `json:"IsInRecycleBin"`
}
type DegooErrors struct {
Path []string `json:"path"`
Data interface{} `json:"data"`
ErrorType string `json:"errorType"`
ErrorInfo interface{} `json:"errorInfo"`
Message string `json:"message"`
}
// DegooGraphqlResponse is the common structure for GraphQL API responses.
type DegooGraphqlResponse struct {
Data json.RawMessage `json:"data"`
Errors []DegooErrors `json:"errors,omitempty"`
}
// DegooGetChildren5Data is the data field for getFileChildren5.
type DegooGetChildren5Data struct {
GetFileChildren5 struct {
Items []DegooFileItem `json:"Items"`
NextToken string `json:"NextToken"`
} `json:"getFileChildren5"`
}
// DegooGetOverlay4Data is the data field for getOverlay4.
type DegooGetOverlay4Data struct {
GetOverlay4 DegooFileItem `json:"getOverlay4"`
}
// DegooFileRenameInfo represents a file rename operation.
type DegooFileRenameInfo struct {
ID string `json:"ID"`
NewName string `json:"NewName"`
}
// DegooFileIDs represents a list of file IDs for move operations.
type DegooFileIDs struct {
FileIDs []string `json:"FileIDs"`
}
// DegooGetBucketWriteAuth4Data is the data field for GetBucketWriteAuth4.
type DegooGetBucketWriteAuth4Data struct {
GetBucketWriteAuth4 []struct {
AuthData struct {
PolicyBase64 string `json:"PolicyBase64"`
Signature string `json:"Signature"`
BaseURL string `json:"BaseURL"`
KeyPrefix string `json:"KeyPrefix"`
AccessKey struct {
Key string `json:"Key"`
Value string `json:"Value"`
} `json:"AccessKey"`
ACL string `json:"ACL"`
AdditionalBody []struct {
Key string `json:"Key"`
Value string `json:"Value"`
} `json:"AdditionalBody"`
} `json:"AuthData"`
Error interface{} `json:"Error"`
} `json:"getBucketWriteAuth4"`
}
// DegooSetUploadFile3Data is the data field for SetUploadFile3.
type DegooSetUploadFile3Data struct {
SetUploadFile3 bool `json:"setUploadFile3"`
}

198
drivers/degoo/upload.go Normal file
View File

@ -0,0 +1,198 @@
package degoo
import (
"bytes"
"context"
"crypto/sha1"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"strconv"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
func (d *Degoo) getBucketWriteAuth4(ctx context.Context, file model.FileStreamer, parentID string, checksum string) (*DegooGetBucketWriteAuth4Data, error) {
const query = `query GetBucketWriteAuth4(
$Token: String!
$ParentID: String!
$StorageUploadInfos: [StorageUploadInfo2]
) {
getBucketWriteAuth4(
Token: $Token
ParentID: $ParentID
StorageUploadInfos: $StorageUploadInfos
) {
AuthData {
PolicyBase64
Signature
BaseURL
KeyPrefix
AccessKey {
Key
Value
}
ACL
AdditionalBody {
Key
Value
}
}
Error
}
}`
variables := map[string]interface{}{
"Token": d.AccessToken,
"ParentID": parentID,
"StorageUploadInfos": []map[string]string{{
"FileName": file.GetName(),
"Checksum": checksum,
"Size": strconv.FormatInt(file.GetSize(), 10),
}}}
data, err := d.apiCall(ctx, "GetBucketWriteAuth4", query, variables)
if err != nil {
return nil, err
}
var resp DegooGetBucketWriteAuth4Data
err = json.Unmarshal(data, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
// checkSum calculates the SHA1-based checksum for Degoo upload API.
func (d *Degoo) checkSum(file io.Reader) (string, error) {
seed := []byte{13, 7, 2, 2, 15, 40, 75, 117, 13, 10, 19, 16, 29, 23, 3, 36}
hasher := sha1.New()
hasher.Write(seed)
if _, err := utils.CopyWithBuffer(hasher, file); err != nil {
return "", err
}
cs := hasher.Sum(nil)
csBytes := []byte{10, byte(len(cs))}
csBytes = append(csBytes, cs...)
csBytes = append(csBytes, 16, 0)
return strings.ReplaceAll(base64.StdEncoding.EncodeToString(csBytes), "/", "_"), nil
}
func (d *Degoo) uploadS3(ctx context.Context, auths *DegooGetBucketWriteAuth4Data, tmpF model.File, file model.FileStreamer, checksum string) error {
a := auths.GetBucketWriteAuth4[0].AuthData
_, err := tmpF.Seek(0, io.SeekStart)
if err != nil {
return err
}
ext := utils.Ext(file.GetName())
key := fmt.Sprintf("%s%s/%s.%s", a.KeyPrefix, ext, checksum, ext)
var b bytes.Buffer
w := multipart.NewWriter(&b)
err = w.WriteField("key", key)
if err != nil {
return err
}
err = w.WriteField("acl", a.ACL)
if err != nil {
return err
}
err = w.WriteField("policy", a.PolicyBase64)
if err != nil {
return err
}
err = w.WriteField("signature", a.Signature)
if err != nil {
return err
}
err = w.WriteField(a.AccessKey.Key, a.AccessKey.Value)
if err != nil {
return err
}
for _, additional := range a.AdditionalBody {
err = w.WriteField(additional.Key, additional.Value)
if err != nil {
return err
}
}
err = w.WriteField("Content-Type", "")
if err != nil {
return err
}
_, err = w.CreateFormFile("file", key)
if err != nil {
return err
}
headSize := b.Len()
err = w.Close()
if err != nil {
return err
}
head := bytes.NewReader(b.Bytes()[:headSize])
tail := bytes.NewReader(b.Bytes()[headSize:])
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.MultiReader(head, tmpF, tail))
req, err := http.NewRequestWithContext(ctx, http.MethodPost, a.BaseURL, rateLimitedRd)
if err != nil {
return err
}
req.Header.Add("ngsw-bypass", "1")
req.Header.Add("Content-Type", w.FormDataContentType())
res, err := d.client.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusNoContent {
return fmt.Errorf("upload failed with status code %d", res.StatusCode)
}
return nil
}
var _ driver.Driver = (*Degoo)(nil)
func (d *Degoo) SetUploadFile3(ctx context.Context, file model.FileStreamer, parentID string, checksum string) (*DegooSetUploadFile3Data, error) {
const query = `mutation SetUploadFile3($Token: String!, $FileInfos: [FileInfoUpload3]!) {
setUploadFile3(Token: $Token, FileInfos: $FileInfos)
}`
variables := map[string]interface{}{
"Token": d.AccessToken,
"FileInfos": []map[string]string{{
"Checksum": checksum,
"CreationTime": strconv.FormatInt(file.CreateTime().UnixMilli(), 10),
"Name": file.GetName(),
"ParentID": parentID,
"Size": strconv.FormatInt(file.GetSize(), 10),
}}}
data, err := d.apiCall(ctx, "SetUploadFile3", query, variables)
if err != nil {
return nil, err
}
var resp DegooSetUploadFile3Data
err = json.Unmarshal(data, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}

462
drivers/degoo/util.go Normal file
View File

@ -0,0 +1,462 @@
package degoo
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
// Thanks to https://github.com/bernd-wechner/Degoo for API research.
const (
// API endpoints
loginURL = "https://rest-api.degoo.com/login"
accessTokenURL = "https://rest-api.degoo.com/access-token/v2"
apiURL = "https://production-appsync.degoo.com/graphql"
// API configuration
apiKey = "da2-vs6twz5vnjdavpqndtbzg3prra"
folderChecksum = "CgAQAg"
// Token management
tokenRefreshThreshold = 5 * time.Minute
// Rate limiting
minRequestInterval = 1 * time.Second
// Error messages
errRateLimited = "rate limited (429), please try again later"
errUnauthorized = "unauthorized access"
)
var (
// Global rate limiting - protects against concurrent API calls
lastRequestTime time.Time
requestMutex sync.Mutex
)
// JWT payload structure for token expiration checking
type JWTPayload struct {
UserID string `json:"userID"`
Exp int64 `json:"exp"`
Iat int64 `json:"iat"`
}
// Rate limiting helper functions
// applyRateLimit ensures minimum interval between API requests
func applyRateLimit() {
requestMutex.Lock()
defer requestMutex.Unlock()
if !lastRequestTime.IsZero() {
if elapsed := time.Since(lastRequestTime); elapsed < minRequestInterval {
time.Sleep(minRequestInterval - elapsed)
}
}
lastRequestTime = time.Now()
}
// HTTP request helper functions
// createJSONRequest creates a new HTTP request with JSON body
func createJSONRequest(ctx context.Context, method, url string, body interface{}) (*http.Request, error) {
jsonBody, err := json.Marshal(body)
if err != nil {
return nil, fmt.Errorf("failed to marshal request body: %w", err)
}
req, err := http.NewRequestWithContext(ctx, method, url, bytes.NewBuffer(jsonBody))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", base.UserAgent)
return req, nil
}
// checkHTTPResponse checks for common HTTP error conditions
func checkHTTPResponse(resp *http.Response, operation string) error {
if resp.StatusCode == http.StatusTooManyRequests {
return fmt.Errorf("%s %s", operation, errRateLimited)
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s failed: %s", operation, resp.Status)
}
return nil
}
// isTokenExpired checks if the JWT token is expired or will expire soon
func (d *Degoo) isTokenExpired() bool {
if d.AccessToken == "" {
return true
}
payload, err := extractJWTPayload(d.AccessToken)
if err != nil {
return true // Invalid token format
}
// Check if token expires within the threshold
expireTime := time.Unix(payload.Exp, 0)
return time.Now().Add(tokenRefreshThreshold).After(expireTime)
}
// extractJWTPayload extracts and parses JWT payload
func extractJWTPayload(token string) (*JWTPayload, error) {
parts := strings.Split(token, ".")
if len(parts) != 3 {
return nil, fmt.Errorf("invalid JWT format")
}
// Decode the payload (second part)
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
return nil, fmt.Errorf("failed to decode JWT payload: %w", err)
}
var jwtPayload JWTPayload
if err := json.Unmarshal(payload, &jwtPayload); err != nil {
return nil, fmt.Errorf("failed to parse JWT payload: %w", err)
}
return &jwtPayload, nil
}
// refreshToken attempts to refresh the access token using the refresh token
func (d *Degoo) refreshToken(ctx context.Context) error {
if d.RefreshToken == "" {
return fmt.Errorf("no refresh token available")
}
// Create request
tokenReq := DegooAccessTokenRequest{RefreshToken: d.RefreshToken}
req, err := createJSONRequest(ctx, "POST", accessTokenURL, tokenReq)
if err != nil {
return fmt.Errorf("failed to create refresh token request: %w", err)
}
// Execute request
resp, err := d.client.Do(req)
if err != nil {
return fmt.Errorf("refresh token request failed: %w", err)
}
defer resp.Body.Close()
// Check response
if err := checkHTTPResponse(resp, "refresh token"); err != nil {
return err
}
var accessTokenResp DegooAccessTokenResponse
if err := json.NewDecoder(resp.Body).Decode(&accessTokenResp); err != nil {
return fmt.Errorf("failed to parse access token response: %w", err)
}
if accessTokenResp.AccessToken == "" {
return fmt.Errorf("empty access token received")
}
d.AccessToken = accessTokenResp.AccessToken
// Save the updated token to storage
op.MustSaveDriverStorage(d)
return nil
}
// ensureValidToken ensures we have a valid, non-expired token
func (d *Degoo) ensureValidToken(ctx context.Context) error {
// Check if token is expired or will expire soon
if d.isTokenExpired() {
// Try to refresh token first if we have a refresh token
if d.RefreshToken != "" {
if refreshErr := d.refreshToken(ctx); refreshErr == nil {
return nil // Successfully refreshed
} else {
// If refresh failed, fall back to full login
fmt.Printf("Token refresh failed, falling back to full login: %v\n", refreshErr)
}
}
// Perform full login
if d.Username != "" && d.Password != "" {
return d.login(ctx)
}
}
return nil
}
// login performs the login process and retrieves the access token.
func (d *Degoo) login(ctx context.Context) error {
if d.Username == "" || d.Password == "" {
return fmt.Errorf("username or password not provided")
}
creds := DegooLoginRequest{
GenerateToken: true,
Username: d.Username,
Password: d.Password,
}
jsonCreds, err := json.Marshal(creds)
if err != nil {
return fmt.Errorf("failed to serialize login credentials: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", loginURL, bytes.NewBuffer(jsonCreds))
if err != nil {
return fmt.Errorf("failed to create login request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", base.UserAgent)
req.Header.Set("Origin", "https://app.degoo.com")
resp, err := d.client.Do(req)
if err != nil {
return fmt.Errorf("login request failed: %w", err)
}
defer resp.Body.Close()
// Handle rate limiting (429 Too Many Requests)
if resp.StatusCode == http.StatusTooManyRequests {
return fmt.Errorf("login rate limited (429), please try again later")
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("login failed: %s", resp.Status)
}
var loginResp DegooLoginResponse
if err := json.NewDecoder(resp.Body).Decode(&loginResp); err != nil {
return fmt.Errorf("failed to parse login response: %w", err)
}
if loginResp.RefreshToken != "" {
tokenReq := DegooAccessTokenRequest{RefreshToken: loginResp.RefreshToken}
jsonTokenReq, err := json.Marshal(tokenReq)
if err != nil {
return fmt.Errorf("failed to serialize access token request: %w", err)
}
tokenReqHTTP, err := http.NewRequestWithContext(ctx, "POST", accessTokenURL, bytes.NewBuffer(jsonTokenReq))
if err != nil {
return fmt.Errorf("failed to create access token request: %w", err)
}
tokenReqHTTP.Header.Set("User-Agent", base.UserAgent)
tokenResp, err := d.client.Do(tokenReqHTTP)
if err != nil {
return fmt.Errorf("failed to get access token: %w", err)
}
defer tokenResp.Body.Close()
var accessTokenResp DegooAccessTokenResponse
if err := json.NewDecoder(tokenResp.Body).Decode(&accessTokenResp); err != nil {
return fmt.Errorf("failed to parse access token response: %w", err)
}
d.AccessToken = accessTokenResp.AccessToken
d.RefreshToken = loginResp.RefreshToken // Save refresh token
} else if loginResp.Token != "" {
d.AccessToken = loginResp.Token
d.RefreshToken = "" // Direct token, no refresh token available
} else {
return fmt.Errorf("login failed, no valid token returned")
}
// Save the updated tokens to storage
op.MustSaveDriverStorage(d)
return nil
}
// apiCall performs a Degoo GraphQL API request.
func (d *Degoo) apiCall(ctx context.Context, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
// Apply rate limiting
applyRateLimit()
// Ensure we have a valid token before making the API call
if err := d.ensureValidToken(ctx); err != nil {
return nil, fmt.Errorf("failed to ensure valid token: %w", err)
}
// Update the Token in variables if it exists (after potential refresh)
d.updateTokenInVariables(variables)
return d.executeGraphQLRequest(ctx, operationName, query, variables)
}
// updateTokenInVariables updates the Token field in GraphQL variables
func (d *Degoo) updateTokenInVariables(variables map[string]interface{}) {
if variables != nil {
if _, hasToken := variables["Token"]; hasToken {
variables["Token"] = d.AccessToken
}
}
}
// executeGraphQLRequest executes a GraphQL request with retry logic
func (d *Degoo) executeGraphQLRequest(ctx context.Context, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
reqBody := map[string]interface{}{
"operationName": operationName,
"query": query,
"variables": variables,
}
// Create and configure request
req, err := createJSONRequest(ctx, "POST", apiURL, reqBody)
if err != nil {
return nil, err
}
// Set Degoo-specific headers
req.Header.Set("x-api-key", apiKey)
if d.AccessToken != "" {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.AccessToken))
}
// Execute request
resp, err := d.client.Do(req)
if err != nil {
return nil, fmt.Errorf("GraphQL API request failed: %w", err)
}
defer resp.Body.Close()
// Check for HTTP errors
if err := checkHTTPResponse(resp, "GraphQL API"); err != nil {
return nil, err
}
// Parse GraphQL response
var degooResp DegooGraphqlResponse
if err := json.NewDecoder(resp.Body).Decode(&degooResp); err != nil {
return nil, fmt.Errorf("failed to decode GraphQL response: %w", err)
}
// Handle GraphQL errors
if len(degooResp.Errors) > 0 {
return d.handleGraphQLError(ctx, degooResp.Errors[0], operationName, query, variables)
}
return degooResp.Data, nil
}
// handleGraphQLError handles GraphQL-level errors with retry logic
func (d *Degoo) handleGraphQLError(ctx context.Context, gqlError DegooErrors, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
if gqlError.ErrorType == "Unauthorized" {
// Re-login and retry
if err := d.login(ctx); err != nil {
return nil, fmt.Errorf("%s, login failed: %w", errUnauthorized, err)
}
// Update token in variables and retry
d.updateTokenInVariables(variables)
return d.apiCall(ctx, operationName, query, variables)
}
return nil, fmt.Errorf("GraphQL API error: %s", gqlError.Message)
}
// humanReadableTimes converts Degoo timestamps to Go time.Time.
func humanReadableTimes(creation, modification, upload string) (cTime, mTime, uTime time.Time) {
cTime, _ = time.Parse(time.RFC3339, creation)
if modification != "" {
modMillis, _ := strconv.ParseInt(modification, 10, 64)
mTime = time.Unix(0, modMillis*int64(time.Millisecond))
}
if upload != "" {
upMillis, _ := strconv.ParseInt(upload, 10, 64)
uTime = time.Unix(0, upMillis*int64(time.Millisecond))
}
return cTime, mTime, uTime
}
// getDevices fetches and caches top-level devices and folders.
func (d *Degoo) getDevices(ctx context.Context) error {
const query = `query GetFileChildren5($Token: String! $ParentID: String $AllParentIDs: [String] $Limit: Int! $Order: Int! $NextToken: String ) { getFileChildren5(Token: $Token ParentID: $ParentID AllParentIDs: $AllParentIDs Limit: $Limit Order: $Order NextToken: $NextToken) { Items { ParentID } NextToken } }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"ParentID": "0",
"Limit": 10,
"Order": 3,
}
data, err := d.apiCall(ctx, "GetFileChildren5", query, variables)
if err != nil {
return err
}
var resp DegooGetChildren5Data
if err := json.Unmarshal(data, &resp); err != nil {
return fmt.Errorf("failed to parse device list: %w", err)
}
if d.RootFolderID == "0" {
if len(resp.GetFileChildren5.Items) > 0 {
d.RootFolderID = resp.GetFileChildren5.Items[0].ParentID
}
op.MustSaveDriverStorage(d)
}
return nil
}
// getAllFileChildren5 fetches all children of a directory with pagination.
func (d *Degoo) getAllFileChildren5(ctx context.Context, parentID string) ([]DegooFileItem, error) {
const query = `query GetFileChildren5($Token: String! $ParentID: String $AllParentIDs: [String] $Limit: Int! $Order: Int! $NextToken: String ) { getFileChildren5(Token: $Token ParentID: $ParentID AllParentIDs: $AllParentIDs Limit: $Limit Order: $Order NextToken: $NextToken) { Items { ID ParentID Name Category Size CreationTime LastModificationTime LastUploadTime FilePath IsInRecycleBin DeviceID MetadataID } NextToken } }`
var allItems []DegooFileItem
nextToken := ""
for {
variables := map[string]interface{}{
"Token": d.AccessToken,
"ParentID": parentID,
"Limit": 1000,
"Order": 3,
}
if nextToken != "" {
variables["NextToken"] = nextToken
}
data, err := d.apiCall(ctx, "GetFileChildren5", query, variables)
if err != nil {
return nil, err
}
var resp DegooGetChildren5Data
if err := json.Unmarshal(data, &resp); err != nil {
return nil, err
}
allItems = append(allItems, resp.GetFileChildren5.Items...)
if resp.GetFileChildren5.NextToken == "" {
break
}
nextToken = resp.GetFileChildren5.NextToken
}
return allItems, nil
}
// getOverlay4 fetches metadata for a single item by ID.
func (d *Degoo) getOverlay4(ctx context.Context, id string) (DegooFileItem, error) {
const query = `query GetOverlay4($Token: String!, $ID: IDType!) { getOverlay4(Token: $Token, ID: $ID) { ID ParentID Name Category Size CreationTime LastModificationTime LastUploadTime URL FilePath IsInRecycleBin DeviceID MetadataID } }`
variables := map[string]interface{}{
"Token": d.AccessToken,
"ID": map[string]string{
"FileID": id,
},
}
data, err := d.apiCall(ctx, "GetOverlay4", query, variables)
if err != nil {
return DegooFileItem{}, err
}
var resp DegooGetOverlay4Data
if err := json.Unmarshal(data, &resp); err != nil {
return DegooFileItem{}, fmt.Errorf("failed to parse item metadata: %w", err)
}
return resp.GetOverlay4, nil
}

View File

@ -13,7 +13,7 @@ type Addition struct {
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"` ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
AccessToken string AccessToken string
RefreshToken string `json:"refresh_token" required:"true"` RefreshToken string `json:"refresh_token" required:"true"`
RootNamespaceId string RootNamespaceId string `json:"RootNamespaceId" required:"false"`
} }
var config = driver.Config{ var config = driver.Config{

View File

@ -175,6 +175,13 @@ func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset
} }
req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken) req.Header.Set("Authorization", "Bearer "+d.AccessToken)
if d.RootNamespaceId != "" {
apiPathRootJson, err := d.buildPathRootHeader()
if err != nil {
return err
}
req.Header.Set("Dropbox-API-Path-Root", apiPathRootJson)
}
uploadFinishArgs := UploadFinishArgs{ uploadFinishArgs := UploadFinishArgs{
Commit: struct { Commit: struct {
@ -219,6 +226,13 @@ func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
} }
req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken) req.Header.Set("Authorization", "Bearer "+d.AccessToken)
if d.RootNamespaceId != "" {
apiPathRootJson, err := d.buildPathRootHeader()
if err != nil {
return "", err
}
req.Header.Set("Dropbox-API-Path-Root", apiPathRootJson)
}
req.Header.Set("Dropbox-API-Arg", "{\"close\":false}") req.Header.Set("Dropbox-API-Arg", "{\"close\":false}")
res, err := base.HttpClient.Do(req) res, err := base.HttpClient.Do(req)
@ -233,3 +247,11 @@ func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
_ = res.Body.Close() _ = res.Body.Close()
return sessionId, nil return sessionId, nil
} }
func (d *Dropbox) buildPathRootHeader() (string, error) {
return utils.Json.MarshalToString(map[string]interface{}{
".tag": "root",
"root": d.RootNamespaceId,
})
}

View File

@ -296,6 +296,23 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
return nil, err return nil, err
} }
upToken := utils.Json.Get(res, "upToken").ToString() upToken := utils.Json.Get(res, "upToken").ToString()
if upToken == "-1" {
// 支持秒传
var resp UploadTokenRapidResp
err := utils.Json.Unmarshal(res, &resp)
if err != nil {
return nil, err
}
return &model.Object{
ID: strconv.FormatInt(resp.Map.FileID, 10),
Name: resp.Map.FileName,
Size: s.GetSize(),
Modified: s.ModTime(),
Ctime: s.CreateTime(),
IsFolder: false,
HashInfo: utils.NewHashInfo(utils.MD5, etag),
}, nil
}
now := time.Now() now := time.Now()
key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli()) key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli())
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{

View File

@ -32,6 +32,7 @@ func init() {
Name: "ILanZou", Name: "ILanZou",
DefaultRoot: "0", DefaultRoot: "0",
LocalSort: true, LocalSort: true,
NoOverwriteUpload: true,
}, },
conf: Conf{ conf: Conf{
base: "https://api.ilanzou.com", base: "https://api.ilanzou.com",
@ -50,6 +51,7 @@ func init() {
Name: "FeijiPan", Name: "FeijiPan",
DefaultRoot: "0", DefaultRoot: "0",
LocalSort: true, LocalSort: true,
NoOverwriteUpload: true,
}, },
conf: Conf{ conf: Conf{
base: "https://api.feijipan.com", base: "https://api.feijipan.com",

View File

@ -43,6 +43,18 @@ type Part struct {
ETag string `json:"etag"` ETag string `json:"etag"`
} }
type UploadTokenRapidResp struct {
Msg string `json:"msg"`
Code int `json:"code"`
UpToken string `json:"upToken"`
Map struct {
FileIconID int `json:"fileIconId"`
FileName string `json:"fileName"`
FileIcon string `json:"fileIcon"`
FileID int64 `json:"fileId"`
} `json:"map"`
}
type UploadResultResp struct { type UploadResultResp struct {
Msg string `json:"msg"` Msg string `json:"msg"`
Code int `json:"code"` Code int `json:"code"`

View File

@ -374,6 +374,13 @@ func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
err = os.Remove(obj.GetPath()) err = os.Remove(obj.GetPath())
} }
} else { } else {
if !utils.Exists(d.RecycleBinPath) {
err = os.MkdirAll(d.RecycleBinPath, 0755)
if err != nil {
return err
}
}
dstPath := filepath.Join(d.RecycleBinPath, obj.GetName()) dstPath := filepath.Join(d.RecycleBinPath, obj.GetName())
if utils.Exists(dstPath) { if utils.Exists(dstPath) {
dstPath = filepath.Join(d.RecycleBinPath, obj.GetName()+"_"+time.Now().Format("20060102150405")) dstPath = filepath.Join(d.RecycleBinPath, obj.GetName()+"_"+time.Now().Format("20060102150405"))

435
drivers/mediafire/driver.go Normal file
View File

@ -0,0 +1,435 @@
package mediafire
/*
Package mediafire
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-11
D@' 3z K!7 - The King Of Cracking
Modifications by ILoveScratch2<ilovescratch@foxmail.com>
Date: 2025-09-14
*/
import (
"context"
"fmt"
"math/rand"
"net/http"
"os"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
type Mediafire struct {
model.Storage
Addition
cron *cron.Cron
actionToken string
appBase string
apiBase string
hostBase string
maxRetries int
secChUa string
secChUaPlatform string
userAgent string
}
func (d *Mediafire) Config() driver.Config {
return config
}
func (d *Mediafire) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Mediafire) Init(ctx context.Context) error {
if d.SessionToken == "" {
return fmt.Errorf("Init :: [MediaFire] {critical} missing sessionToken")
}
if d.Cookie == "" {
return fmt.Errorf("Init :: [MediaFire] {critical} missing Cookie")
}
if _, err := d.getSessionToken(ctx); err != nil {
d.renewToken(ctx)
num := rand.Intn(4) + 6
d.cron = cron.NewCron(time.Minute * time.Duration(num))
d.cron.Do(func() {
d.renewToken(ctx)
})
}
return nil
}
func (d *Mediafire) Drop(ctx context.Context) error {
return nil
}
func (d *Mediafire) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
files, err := d.getFiles(ctx, dir.GetID())
if err != nil {
return nil, err
}
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
return d.fileToObj(src), nil
})
}
func (d *Mediafire) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
downloadUrl, err := d.getDirectDownloadLink(ctx, file.GetID())
if err != nil {
return nil, err
}
res, err := base.NoRedirectClient.R().SetDoNotParseResponse(true).SetContext(ctx).Get(downloadUrl)
if err != nil {
return nil, err
}
defer func() {
_ = res.RawBody().Close()
}()
if res.StatusCode() == 302 {
downloadUrl = res.Header().Get("location")
}
return &model.Link{
URL: downloadUrl,
Header: http.Header{
"Origin": []string{d.appBase},
"Referer": []string{d.appBase + "/"},
"sec-ch-ua": []string{d.secChUa},
"sec-ch-ua-platform": []string{d.secChUaPlatform},
"User-Agent": []string{d.userAgent},
//"User-Agent": []string{base.UserAgent},
},
}, nil
}
func (d *Mediafire) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
data := map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"parent_key": parentDir.GetID(),
"foldername": dirName,
}
var resp MediafireFolderCreateResponse
_, err := d.postForm("/folder/create.php", data, &resp)
if err != nil {
return nil, err
}
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
}
created, _ := time.Parse("2006-01-02T15:04:05Z", resp.Response.CreatedUTC)
return &model.ObjThumb{
Object: model.Object{
ID: resp.Response.FolderKey,
Name: resp.Response.Name,
Size: 0,
Modified: created,
Ctime: created,
IsFolder: true,
},
Thumbnail: model.Thumbnail{},
}, nil
}
func (d *Mediafire) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var data map[string]string
var endpoint string
if srcObj.IsDir() {
endpoint = "/folder/move.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"folder_key_src": srcObj.GetID(),
"folder_key_dst": dstDir.GetID(),
}
} else {
endpoint = "/file/move.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"quick_key": srcObj.GetID(),
"folder_key": dstDir.GetID(),
}
}
var resp MediafireMoveResponse
_, err := d.postForm(endpoint, data, &resp)
if err != nil {
return nil, err
}
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
}
return srcObj, nil
}
func (d *Mediafire) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
var data map[string]string
var endpoint string
if srcObj.IsDir() {
endpoint = "/folder/update.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"folder_key": srcObj.GetID(),
"foldername": newName,
}
} else {
endpoint = "/file/update.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"quick_key": srcObj.GetID(),
"filename": newName,
}
}
var resp MediafireRenameResponse
_, err := d.postForm(endpoint, data, &resp)
if err != nil {
return nil, err
}
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
}
return &model.ObjThumb{
Object: model.Object{
ID: srcObj.GetID(),
Name: newName,
Size: srcObj.GetSize(),
Modified: srcObj.ModTime(),
Ctime: srcObj.CreateTime(),
IsFolder: srcObj.IsDir(),
},
Thumbnail: model.Thumbnail{},
}, nil
}
func (d *Mediafire) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var data map[string]string
var endpoint string
if srcObj.IsDir() {
endpoint = "/folder/copy.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"folder_key_src": srcObj.GetID(),
"folder_key_dst": dstDir.GetID(),
}
} else {
endpoint = "/file/copy.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"quick_key": srcObj.GetID(),
"folder_key": dstDir.GetID(),
}
}
var resp MediafireCopyResponse
_, err := d.postForm(endpoint, data, &resp)
if err != nil {
return nil, err
}
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
}
var newID string
if srcObj.IsDir() {
if len(resp.Response.NewFolderKeys) > 0 {
newID = resp.Response.NewFolderKeys[0]
}
} else {
if len(resp.Response.NewQuickKeys) > 0 {
newID = resp.Response.NewQuickKeys[0]
}
}
return &model.ObjThumb{
Object: model.Object{
ID: newID,
Name: srcObj.GetName(),
Size: srcObj.GetSize(),
Modified: srcObj.ModTime(),
Ctime: srcObj.CreateTime(),
IsFolder: srcObj.IsDir(),
},
Thumbnail: model.Thumbnail{},
}, nil
}
func (d *Mediafire) Remove(ctx context.Context, obj model.Obj) error {
var data map[string]string
var endpoint string
if obj.IsDir() {
endpoint = "/folder/delete.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"folder_key": obj.GetID(),
}
} else {
endpoint = "/file/delete.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"quick_key": obj.GetID(),
}
}
var resp MediafireRemoveResponse
_, err := d.postForm(endpoint, data, &resp)
if err != nil {
return err
}
if resp.Response.Result != "Success" {
return fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
}
return nil
}
func (d *Mediafire) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
_, err := d.PutResult(ctx, dstDir, file, up)
return err
}
func (d *Mediafire) PutResult(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
tempFile, err := file.CacheFullAndWriter(&up, nil)
if err != nil {
return nil, err
}
osFile, ok := tempFile.(*os.File)
if !ok {
return nil, fmt.Errorf("expected *os.File, got %T", tempFile)
}
fileHash, err := d.calculateSHA256(osFile)
if err != nil {
return nil, err
}
checkResp, err := d.uploadCheck(ctx, file.GetName(), file.GetSize(), fileHash, dstDir.GetID())
if err != nil {
return nil, err
}
if checkResp.Response.ResumableUpload.AllUnitsReady == "yes" {
up(100.0)
}
if checkResp.Response.HashExists == "yes" && checkResp.Response.InAccount == "yes" {
up(100.0)
existingFile, err := d.getExistingFileInfo(ctx, fileHash, file.GetName(), dstDir.GetID())
if err == nil {
return existingFile, nil
}
}
var pollKey string
if checkResp.Response.ResumableUpload.AllUnitsReady != "yes" {
var err error
pollKey, err = d.uploadUnits(ctx, osFile, checkResp, file.GetName(), fileHash, dstDir.GetID(), up)
if err != nil {
return nil, err
}
} else {
pollKey = checkResp.Response.ResumableUpload.UploadKey
}
//fmt.Printf("pollKey: %+v\n", pollKey)
pollResp, err := d.pollUpload(ctx, pollKey)
if err != nil {
return nil, err
}
quickKey := pollResp.Response.Doupload.QuickKey
return &model.ObjThumb{
Object: model.Object{
ID: quickKey,
Name: file.GetName(),
Size: file.GetSize(),
},
Thumbnail: model.Thumbnail{},
}, nil
}
func (d *Mediafire) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Mediafire) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Mediafire) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Mediafire) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
// return errs.NotImplement to use an internal archive tool
return nil, errs.NotImplement
}
//func (d *Mediafire) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*Mediafire)(nil)

57
drivers/mediafire/meta.go Normal file
View File

@ -0,0 +1,57 @@
package mediafire
/*
Package mediafire
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-11
D@' 3z K!7 - The King Of Cracking
Modifications by ILoveScratch2<ilovescratch@foxmail.com>
Date: 2025-09-14
*/
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
driver.RootPath
//driver.RootID
SessionToken string `json:"session_token" required:"true" type:"string" help:"Required for MediaFire API"`
Cookie string `json:"cookie" required:"true" type:"string" help:"Required for navigation"`
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"100"`
}
var config = driver.Config{
Name: "MediaFire",
LocalSort: false,
OnlyLinkMFile: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "/",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Mediafire{
appBase: "https://app.mediafire.com",
apiBase: "https://www.mediafire.com/api/1.5",
hostBase: "https://www.mediafire.com",
maxRetries: 3,
secChUa: "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"139\", \"Google Chrome\";v=\"139\"",
secChUaPlatform: "Windows",
userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
}
})
}

232
drivers/mediafire/types.go Normal file
View File

@ -0,0 +1,232 @@
package mediafire
/*
Package mediafire
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-11
D@' 3z K!7 - The King Of Cracking
*/
type MediafireRenewTokenResponse struct {
Response struct {
Action string `json:"action"`
SessionToken string `json:"session_token"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafireResponse struct {
Response struct {
Action string `json:"action"`
FolderContent struct {
ChunkSize string `json:"chunk_size"`
ContentType string `json:"content_type"`
ChunkNumber string `json:"chunk_number"`
FolderKey string `json:"folderkey"`
Folders []MediafireFolder `json:"folders,omitempty"`
Files []MediafireFile `json:"files,omitempty"`
MoreChunks string `json:"more_chunks"`
} `json:"folder_content"`
Result string `json:"result"`
} `json:"response"`
}
type MediafireFolder struct {
FolderKey string `json:"folderkey"`
Name string `json:"name"`
Created string `json:"created"`
CreatedUTC string `json:"created_utc"`
}
type MediafireFile struct {
QuickKey string `json:"quickkey"`
Filename string `json:"filename"`
Size string `json:"size"`
Created string `json:"created"`
CreatedUTC string `json:"created_utc"`
MimeType string `json:"mimetype"`
}
type File struct {
ID string
Name string
Size int64
CreatedUTC string
IsFolder bool
}
type FolderContentResponse struct {
Folders []MediafireFolder
Files []MediafireFile
MoreChunks bool
}
type MediafireLinksResponse struct {
Response struct {
Action string `json:"action"`
Links []struct {
QuickKey string `json:"quickkey"`
View string `json:"view"`
NormalDownload string `json:"normal_download"`
OneTime struct {
Download string `json:"download"`
View string `json:"view"`
} `json:"one_time"`
} `json:"links"`
OneTimeKeyRequestCount string `json:"one_time_key_request_count"`
OneTimeKeyRequestMaxCount string `json:"one_time_key_request_max_count"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafireDirectDownloadResponse struct {
Response struct {
Action string `json:"action"`
Links []struct {
QuickKey string `json:"quickkey"`
DirectDownload string `json:"direct_download"`
} `json:"links"`
DirectDownloadFreeBandwidth string `json:"direct_download_free_bandwidth"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafireFolderCreateResponse struct {
Response struct {
Action string `json:"action"`
FolderKey string `json:"folder_key"`
UploadKey string `json:"upload_key"`
ParentFolderKey string `json:"parent_folderkey"`
Name string `json:"name"`
Description string `json:"description"`
Created string `json:"created"`
CreatedUTC string `json:"created_utc"`
Privacy string `json:"privacy"`
FileCount string `json:"file_count"`
FolderCount string `json:"folder_count"`
Revision string `json:"revision"`
DropboxEnabled string `json:"dropbox_enabled"`
Flag string `json:"flag"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
NewDeviceRevision int `json:"new_device_revision"`
} `json:"response"`
}
type MediafireMoveResponse struct {
Response struct {
Action string `json:"action"`
Asynchronous string `json:"asynchronous,omitempty"`
NewNames []string `json:"new_names"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
NewDeviceRevision int `json:"new_device_revision"`
} `json:"response"`
}
type MediafireRenameResponse struct {
Response struct {
Action string `json:"action"`
Asynchronous string `json:"asynchronous,omitempty"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
NewDeviceRevision int `json:"new_device_revision"`
} `json:"response"`
}
type MediafireCopyResponse struct {
Response struct {
Action string `json:"action"`
Asynchronous string `json:"asynchronous,omitempty"`
NewQuickKeys []string `json:"new_quickkeys,omitempty"`
NewFolderKeys []string `json:"new_folderkeys,omitempty"`
SkippedCount string `json:"skipped_count,omitempty"`
OtherCount string `json:"other_count,omitempty"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
NewDeviceRevision int `json:"new_device_revision"`
} `json:"response"`
}
type MediafireRemoveResponse struct {
Response struct {
Action string `json:"action"`
Asynchronous string `json:"asynchronous,omitempty"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
NewDeviceRevision int `json:"new_device_revision"`
} `json:"response"`
}
type MediafireCheckResponse struct {
Response struct {
Action string `json:"action"`
HashExists string `json:"hash_exists"`
InAccount string `json:"in_account"`
InFolder string `json:"in_folder"`
FileExists string `json:"file_exists"`
ResumableUpload struct {
AllUnitsReady string `json:"all_units_ready"`
NumberOfUnits string `json:"number_of_units"`
UnitSize string `json:"unit_size"`
Bitmap struct {
Count string `json:"count"`
Words []string `json:"words"`
} `json:"bitmap"`
UploadKey string `json:"upload_key"`
} `json:"resumable_upload"`
AvailableSpace string `json:"available_space"`
UsedStorageSize string `json:"used_storage_size"`
StorageLimit string `json:"storage_limit"`
StorageLimitExceeded string `json:"storage_limit_exceeded"`
UploadURL struct {
Simple string `json:"simple"`
SimpleFallback string `json:"simple_fallback"`
Resumable string `json:"resumable"`
ResumableFallback string `json:"resumable_fallback"`
} `json:"upload_url"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafireActionTokenResponse struct {
Response struct {
Action string `json:"action"`
ActionToken string `json:"action_token"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafirePollResponse struct {
Response struct {
Action string `json:"action"`
Doupload struct {
Result string `json:"result"`
Status string `json:"status"`
Description string `json:"description"`
QuickKey string `json:"quickkey"`
Hash string `json:"hash"`
Filename string `json:"filename"`
Size string `json:"size"`
Created string `json:"created"`
CreatedUTC string `json:"created_utc"`
Revision string `json:"revision"`
} `json:"doupload"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafireFileSearchResponse struct {
Response struct {
Action string `json:"action"`
FileInfo []File `json:"file_info"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}

626
drivers/mediafire/util.go Normal file
View File

@ -0,0 +1,626 @@
package mediafire
/*
Package mediafire
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-11
D@' 3z K!7 - The King Of Cracking
*/
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
func (d *Mediafire) getSessionToken(ctx context.Context) (string, error) {
tokenURL := d.hostBase + "/application/get_session_token.php"
req, err := http.NewRequestWithContext(ctx, http.MethodPost, tokenURL, nil)
if err != nil {
return "", err
}
req.Header.Set("Accept", "*/*")
req.Header.Set("Accept-Encoding", "gzip, deflate, br, zstd")
req.Header.Set("Accept-Language", "en-US,en;q=0.9")
req.Header.Set("Content-Length", "0")
req.Header.Set("Cookie", d.Cookie)
req.Header.Set("DNT", "1")
req.Header.Set("Origin", d.hostBase)
req.Header.Set("Priority", "u=1, i")
req.Header.Set("Referer", (d.hostBase + "/"))
req.Header.Set("Sec-Ch-Ua", d.secChUa)
req.Header.Set("Sec-Ch-Ua-Mobile", "?0")
req.Header.Set("Sec-Ch-Ua-Platform", d.secChUaPlatform)
req.Header.Set("Sec-Fetch-Dest", "empty")
req.Header.Set("Sec-Fetch-Mode", "cors")
req.Header.Set("Sec-Fetch-Site", "same-site")
req.Header.Set("User-Agent", d.userAgent)
//req.Header.Set("Connection", "keep-alive")
resp, err := base.HttpClient.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
//fmt.Printf("getSessionToken :: Raw response: %s\n", string(body))
//fmt.Printf("getSessionToken :: Parsed response: %+v\n", resp)
var tokenResp struct {
Response struct {
SessionToken string `json:"session_token"`
} `json:"response"`
}
if resp.StatusCode == 200 {
if err := json.Unmarshal(body, &tokenResp); err != nil {
return "", err
}
if tokenResp.Response.SessionToken == "" {
return "", fmt.Errorf("empty session token received")
}
cookieMap := make(map[string]string)
for _, cookie := range resp.Cookies() {
cookieMap[cookie.Name] = cookie.Value
}
if len(cookieMap) > 0 {
var cookies []string
for name, value := range cookieMap {
cookies = append(cookies, fmt.Sprintf("%s=%s", name, value))
}
d.Cookie = strings.Join(cookies, "; ")
op.MustSaveDriverStorage(d)
//fmt.Printf("getSessionToken :: Captured cookies: %s\n", d.Cookie)
}
} else {
return "", fmt.Errorf("getSessionToken :: failed to get session token, status code: %d", resp.StatusCode)
}
d.SessionToken = tokenResp.Response.SessionToken
//fmt.Printf("Init :: Obtain Session Token %v", d.SessionToken)
op.MustSaveDriverStorage(d)
return d.SessionToken, nil
}
func (d *Mediafire) renewToken(_ context.Context) error {
query := map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
}
var resp MediafireRenewTokenResponse
_, err := d.postForm("/user/renew_session_token.php", query, &resp)
if err != nil {
return fmt.Errorf("failed to renew token: %w", err)
}
//fmt.Printf("getInfo :: Raw response: %s\n", string(body))
//fmt.Printf("getInfo :: Parsed response: %+v\n", resp)
if resp.Response.Result != "Success" {
return fmt.Errorf("MediaFire token renewal failed: %s", resp.Response.Result)
}
d.SessionToken = resp.Response.SessionToken
//fmt.Printf("Init :: Renew Session Token: %s", resp.Response.Result)
op.MustSaveDriverStorage(d)
return nil
}
func (d *Mediafire) getFiles(ctx context.Context, folderKey string) ([]File, error) {
files := make([]File, 0)
hasMore := true
chunkNumber := 1
for hasMore {
resp, err := d.getFolderContent(ctx, folderKey, chunkNumber)
if err != nil {
return nil, err
}
for _, folder := range resp.Folders {
files = append(files, File{
ID: folder.FolderKey,
Name: folder.Name,
Size: 0,
CreatedUTC: folder.CreatedUTC,
IsFolder: true,
})
}
for _, file := range resp.Files {
size, _ := strconv.ParseInt(file.Size, 10, 64)
files = append(files, File{
ID: file.QuickKey,
Name: file.Filename,
Size: size,
CreatedUTC: file.CreatedUTC,
IsFolder: false,
})
}
hasMore = resp.MoreChunks
chunkNumber++
}
return files, nil
}
func (d *Mediafire) getFolderContent(ctx context.Context, folderKey string, chunkNumber int) (*FolderContentResponse, error) {
foldersResp, err := d.getFolderContentByType(ctx, folderKey, "folders", chunkNumber)
if err != nil {
return nil, err
}
filesResp, err := d.getFolderContentByType(ctx, folderKey, "files", chunkNumber)
if err != nil {
return nil, err
}
return &FolderContentResponse{
Folders: foldersResp.Response.FolderContent.Folders,
Files: filesResp.Response.FolderContent.Files,
MoreChunks: foldersResp.Response.FolderContent.MoreChunks == "yes" || filesResp.Response.FolderContent.MoreChunks == "yes",
}, nil
}
func (d *Mediafire) getFolderContentByType(_ context.Context, folderKey, contentType string, chunkNumber int) (*MediafireResponse, error) {
data := map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"folder_key": folderKey,
"content_type": contentType,
"chunk": strconv.Itoa(chunkNumber),
"chunk_size": strconv.FormatInt(d.ChunkSize, 10),
"details": "yes",
"order_direction": d.OrderDirection,
"order_by": d.OrderBy,
"filter": "",
}
var resp MediafireResponse
_, err := d.postForm("/folder/get_content.php", data, &resp)
if err != nil {
return nil, err
}
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
}
return &resp, nil
}
func (d *Mediafire) fileToObj(f File) *model.ObjThumb {
created, _ := time.Parse("2006-01-02T15:04:05Z", f.CreatedUTC)
var thumbnailURL string
if !f.IsFolder && f.ID != "" {
thumbnailURL = d.hostBase + "/convkey/acaa/" + f.ID + "3g.jpg"
}
return &model.ObjThumb{
Object: model.Object{
ID: f.ID,
//Path: "",
Name: f.Name,
Size: f.Size,
Modified: created,
Ctime: created,
IsFolder: f.IsFolder,
},
Thumbnail: model.Thumbnail{
Thumbnail: thumbnailURL,
},
}
}
func (d *Mediafire) getForm(endpoint string, query map[string]string, resp interface{}) ([]byte, error) {
req := base.RestyClient.R()
req.SetQueryParams(query)
req.SetHeaders(map[string]string{
"Cookie": d.Cookie,
//"User-Agent": base.UserAgent,
"User-Agent": d.userAgent,
"Origin": d.appBase,
"Referer": d.appBase + "/",
})
// If response OK
if resp != nil {
req.SetResult(resp)
}
// Targets MediaFire API
res, err := req.Get(d.apiBase + endpoint)
if err != nil {
return nil, err
}
return res.Body(), nil
}
func (d *Mediafire) postForm(endpoint string, data map[string]string, resp interface{}) ([]byte, error) {
req := base.RestyClient.R()
req.SetFormData(data)
req.SetHeaders(map[string]string{
"Cookie": d.Cookie,
"Content-Type": "application/x-www-form-urlencoded",
//"User-Agent": base.UserAgent,
"User-Agent": d.userAgent,
"Origin": d.appBase,
"Referer": d.appBase + "/",
})
// If response OK
if resp != nil {
req.SetResult(resp)
}
// Targets MediaFire API
res, err := req.Post(d.apiBase + endpoint)
if err != nil {
return nil, err
}
return res.Body(), nil
}
func (d *Mediafire) getDirectDownloadLink(_ context.Context, fileID string) (string, error) {
data := map[string]string{
"session_token": d.SessionToken,
"quick_key": fileID,
"link_type": "direct_download",
"response_format": "json",
}
var resp MediafireDirectDownloadResponse
_, err := d.getForm("/file/get_links.php", data, &resp)
if err != nil {
return "", err
}
if resp.Response.Result != "Success" {
return "", fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
}
if len(resp.Response.Links) == 0 {
return "", fmt.Errorf("no download links found")
}
return resp.Response.Links[0].DirectDownload, nil
}
func (d *Mediafire) calculateSHA256(file *os.File) (string, error) {
hasher := sha256.New()
if _, err := file.Seek(0, 0); err != nil {
return "", err
}
if _, err := io.Copy(hasher, file); err != nil {
return "", err
}
return hex.EncodeToString(hasher.Sum(nil)), nil
}
func (d *Mediafire) uploadCheck(ctx context.Context, filename string, filesize int64, filehash, folderKey string) (*MediafireCheckResponse, error) {
actionToken, err := d.getActionToken(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get action token: %w", err)
}
query := map[string]string{
"session_token": actionToken, /* d.SessionToken */
"filename": filename,
"size": strconv.FormatInt(filesize, 10),
"hash": filehash,
"folder_key": folderKey,
"resumable": "yes",
"response_format": "json",
}
var resp MediafireCheckResponse
_, err = d.postForm("/upload/check.php", query, &resp)
if err != nil {
return nil, err
}
//fmt.Printf("uploadCheck :: Raw response: %s\n", string(body))
//fmt.Printf("uploadCheck :: Parsed response: %+v\n", resp)
//fmt.Printf("uploadCheck :: ResumableUpload section: %+v\n", resp.Response.ResumableUpload)
//fmt.Printf("uploadCheck :: Upload key specifically: '%s'\n", resp.Response.ResumableUpload.UploadKey)
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire upload check failed: %s", resp.Response.Result)
}
return &resp, nil
}
func (d *Mediafire) resumableUpload(ctx context.Context, folderKey, uploadKey string, unitData []byte, unitID int, fileHash, filename string, totalFileSize int64) (string, error) {
actionToken, err := d.getActionToken(ctx)
if err != nil {
return "", err
}
url := d.apiBase + "/upload/resumable.php"
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(unitData))
if err != nil {
return "", err
}
q := req.URL.Query()
q.Add("folder_key", folderKey)
q.Add("response_format", "json")
q.Add("session_token", actionToken)
q.Add("key", uploadKey)
req.URL.RawQuery = q.Encode()
req.Header.Set("x-filehash", fileHash)
req.Header.Set("x-filesize", strconv.FormatInt(totalFileSize, 10))
req.Header.Set("x-unit-id", strconv.Itoa(unitID))
req.Header.Set("x-unit-size", strconv.FormatInt(int64(len(unitData)), 10))
req.Header.Set("x-unit-hash", d.sha256Hex(bytes.NewReader(unitData)))
req.Header.Set("x-filename", filename)
req.Header.Set("Content-Type", "application/octet-stream")
req.ContentLength = int64(len(unitData))
/* fmt.Printf("Debug resumable upload request:\n")
fmt.Printf(" URL: %s\n", req.URL.String())
fmt.Printf(" Headers: %+v\n", req.Header)
fmt.Printf(" Unit ID: %d\n", unitID)
fmt.Printf(" Unit Size: %d\n", len(unitData))
fmt.Printf(" Upload Key: %s\n", uploadKey)
fmt.Printf(" Action Token: %s\n", actionToken) */
res, err := base.HttpClient.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
return "", fmt.Errorf("failed to read response body: %v", err)
}
//fmt.Printf("MediaFire resumable upload response (status %d): %s\n", res.StatusCode, string(body))
var uploadResp struct {
Response struct {
Doupload struct {
Key string `json:"key"`
} `json:"doupload"`
Result string `json:"result"`
} `json:"response"`
}
if err := json.Unmarshal(body, &uploadResp); err != nil {
return "", fmt.Errorf("failed to parse response: %v", err)
}
if res.StatusCode != 200 {
return "", fmt.Errorf("resumable upload failed with status %d", res.StatusCode)
}
return uploadResp.Response.Doupload.Key, nil
}
func (d *Mediafire) uploadUnits(ctx context.Context, file *os.File, checkResp *MediafireCheckResponse, filename, fileHash, folderKey string, up driver.UpdateProgress) (string, error) {
unitSize, _ := strconv.ParseInt(checkResp.Response.ResumableUpload.UnitSize, 10, 64)
numUnits, _ := strconv.Atoi(checkResp.Response.ResumableUpload.NumberOfUnits)
uploadKey := checkResp.Response.ResumableUpload.UploadKey
stringWords := checkResp.Response.ResumableUpload.Bitmap.Words
intWords := make([]int, len(stringWords))
for i, word := range stringWords {
intWords[i], _ = strconv.Atoi(word)
}
var finalUploadKey string
for unitID := 0; unitID < numUnits; unitID++ {
if utils.IsCanceled(ctx) {
return "", ctx.Err()
}
if d.isUnitUploaded(intWords, unitID) {
up(float64(unitID+1) * 100 / float64(numUnits))
continue
}
uploadKey, err := d.uploadSingleUnit(ctx, file, unitID, unitSize, fileHash, filename, uploadKey, folderKey)
if err != nil {
return "", err
}
finalUploadKey = uploadKey
up(float64(unitID+1) * 100 / float64(numUnits))
}
return finalUploadKey, nil
}
func (d *Mediafire) uploadSingleUnit(ctx context.Context, file *os.File, unitID int, unitSize int64, fileHash, filename, uploadKey, folderKey string) (string, error) {
start := int64(unitID) * unitSize
size := unitSize
stat, err := file.Stat()
if err != nil {
return "", err
}
fileSize := stat.Size()
if start+size > fileSize {
size = fileSize - start
}
unitData := make([]byte, size)
if _, err := file.ReadAt(unitData, start); err != nil {
return "", err
}
return d.resumableUpload(ctx, folderKey, uploadKey, unitData, unitID, fileHash, filename, fileSize)
}
func (d *Mediafire) getActionToken(_ context.Context) (string, error) {
if d.actionToken != "" {
return d.actionToken, nil
}
data := map[string]string{
"type": "upload",
"lifespan": "1440",
"response_format": "json",
"session_token": d.SessionToken,
}
var resp MediafireActionTokenResponse
_, err := d.postForm("/user/get_action_token.php", data, &resp)
if err != nil {
return "", err
}
if resp.Response.Result != "Success" {
return "", fmt.Errorf("MediaFire action token failed: %s", resp.Response.Result)
}
return resp.Response.ActionToken, nil
}
func (d *Mediafire) pollUpload(ctx context.Context, key string) (*MediafirePollResponse, error) {
actionToken, err := d.getActionToken(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get action token: %w", err)
}
//fmt.Printf("Debug Key: %+v\n", key)
query := map[string]string{
"key": key,
"response_format": "json",
"session_token": actionToken, /* d.SessionToken */
}
var resp MediafirePollResponse
_, err = d.postForm("/upload/poll_upload.php", query, &resp)
if err != nil {
return nil, err
}
//fmt.Printf("pollUpload :: Raw response: %s\n", string(body))
//fmt.Printf("pollUpload :: Parsed response: %+v\n", resp)
//fmt.Printf("pollUpload :: Debug Result: %+v\n", resp.Response.Result)
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire poll upload failed: %s", resp.Response.Result)
}
return &resp, nil
}
func (d *Mediafire) sha256Hex(r io.Reader) string {
h := sha256.New()
io.Copy(h, r)
return hex.EncodeToString(h.Sum(nil))
}
func (d *Mediafire) isUnitUploaded(words []int, unitID int) bool {
wordIndex := unitID / 16
bitIndex := unitID % 16
if wordIndex >= len(words) {
return false
}
return (words[wordIndex]>>bitIndex)&1 == 1
}
func (d *Mediafire) getExistingFileInfo(ctx context.Context, fileHash, filename, folderKey string) (*model.ObjThumb, error) {
if fileInfo, err := d.getFileByHash(ctx, fileHash); err == nil && fileInfo != nil {
return fileInfo, nil
}
files, err := d.getFiles(ctx, folderKey)
if err != nil {
return nil, err
}
for _, file := range files {
if file.Name == filename && !file.IsFolder {
return d.fileToObj(file), nil
}
}
return nil, fmt.Errorf("existing file not found")
}
func (d *Mediafire) getFileByHash(_ context.Context, hash string) (*model.ObjThumb, error) {
query := map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"hash": hash,
}
var resp MediafireFileSearchResponse
_, err := d.postForm("/file/get_info.php", query, &resp)
if err != nil {
return nil, err
}
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire file search failed: %s", resp.Response.Result)
}
if len(resp.Response.FileInfo) == 0 {
return nil, fmt.Errorf("file not found by hash")
}
file := resp.Response.FileInfo[0]
return d.fileToObj(file), nil
}

View File

@ -0,0 +1,181 @@
package openlist_share
import (
"context"
"fmt"
"net/http"
"net/url"
stdpath "path"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
"github.com/go-resty/resty/v2"
)
type OpenListShare struct {
model.Storage
Addition
serverArchivePreview bool
}
func (d *OpenListShare) Config() driver.Config {
return config
}
func (d *OpenListShare) GetAddition() driver.Additional {
return &d.Addition
}
func (d *OpenListShare) Init(ctx context.Context) error {
d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/")
var settings common.Resp[map[string]string]
_, _, err := d.request("/public/settings", http.MethodGet, func(req *resty.Request) {
req.SetResult(&settings)
})
if err != nil {
return err
}
d.serverArchivePreview = settings.Data["share_archive_preview"] == "true"
return nil
}
func (d *OpenListShare) Drop(ctx context.Context) error {
return nil
}
func (d *OpenListShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
var resp common.Resp[FsListResp]
_, _, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ListReq{
PageReq: model.PageReq{
Page: 1,
PerPage: 0,
},
Path: stdpath.Join(fmt.Sprintf("/@s/%s", d.ShareId), dir.GetPath()),
Password: d.Pwd,
Refresh: false,
})
})
if err != nil {
return nil, err
}
var files []model.Obj
for _, f := range resp.Data.Content {
file := model.ObjThumb{
Object: model.Object{
Name: f.Name,
Modified: f.Modified,
Ctime: f.Created,
Size: f.Size,
IsFolder: f.IsDir,
HashInfo: utils.FromString(f.HashInfo),
},
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
}
files = append(files, &file)
}
return files, nil
}
func (d *OpenListShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
path := utils.FixAndCleanPath(stdpath.Join(d.ShareId, file.GetPath()))
u := fmt.Sprintf("%s/sd%s?pwd=%s", d.Address, path, d.Pwd)
return &model.Link{URL: u}, nil
}
func (d *OpenListShare) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
if !d.serverArchivePreview || !d.ForwardArchiveReq {
return nil, errs.NotImplement
}
var resp common.Resp[ArchiveMetaResp]
_, code, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ArchiveMetaReq{
ArchivePass: args.Password,
Path: stdpath.Join(fmt.Sprintf("/@s/%s", d.ShareId), obj.GetPath()),
Password: d.Pwd,
Refresh: false,
})
})
if code == 202 {
return nil, errs.WrongArchivePassword
}
if err != nil {
return nil, err
}
var tree []model.ObjTree
if resp.Data.Content != nil {
tree = make([]model.ObjTree, 0, len(resp.Data.Content))
for _, content := range resp.Data.Content {
tree = append(tree, &content)
}
}
return &model.ArchiveMetaInfo{
Comment: resp.Data.Comment,
Encrypted: resp.Data.Encrypted,
Tree: tree,
}, nil
}
func (d *OpenListShare) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
if !d.serverArchivePreview || !d.ForwardArchiveReq {
return nil, errs.NotImplement
}
var resp common.Resp[ArchiveListResp]
_, code, err := d.request("/fs/archive/list", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ArchiveListReq{
ArchiveMetaReq: ArchiveMetaReq{
ArchivePass: args.Password,
Path: stdpath.Join(fmt.Sprintf("/@s/%s", d.ShareId), obj.GetPath()),
Password: d.Pwd,
Refresh: false,
},
PageReq: model.PageReq{
Page: 1,
PerPage: 0,
},
InnerPath: args.InnerPath,
})
})
if code == 202 {
return nil, errs.WrongArchivePassword
}
if err != nil {
return nil, err
}
var files []model.Obj
for _, f := range resp.Data.Content {
file := model.ObjThumb{
Object: model.Object{
Name: f.Name,
Modified: f.Modified,
Ctime: f.Created,
Size: f.Size,
IsFolder: f.IsDir,
HashInfo: utils.FromString(f.HashInfo),
},
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
}
files = append(files, &file)
}
return files, nil
}
func (d *OpenListShare) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
if !d.serverArchivePreview || !d.ForwardArchiveReq {
return nil, errs.NotSupport
}
path := utils.FixAndCleanPath(stdpath.Join(d.ShareId, obj.GetPath()))
u := fmt.Sprintf("%s/sad%s?pwd=%s&inner=%s&pass=%s",
d.Address,
path,
d.Pwd,
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password))
return &model.Link{URL: u}, nil
}
var _ driver.Driver = (*OpenListShare)(nil)

View File

@ -0,0 +1,27 @@
package openlist_share
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
driver.RootPath
Address string `json:"url" required:"true"`
ShareId string `json:"sid" required:"true"`
Pwd string `json:"pwd"`
ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"`
}
var config = driver.Config{
Name: "OpenListShare",
LocalSort: true,
NoUpload: true,
DefaultRoot: "/",
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &OpenListShare{}
})
}

View File

@ -0,0 +1,111 @@
package openlist_share
import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
type ListReq struct {
model.PageReq
Path string `json:"path" form:"path"`
Password string `json:"password" form:"password"`
Refresh bool `json:"refresh"`
}
type ObjResp struct {
Name string `json:"name"`
Size int64 `json:"size"`
IsDir bool `json:"is_dir"`
Modified time.Time `json:"modified"`
Created time.Time `json:"created"`
Sign string `json:"sign"`
Thumb string `json:"thumb"`
Type int `json:"type"`
HashInfo string `json:"hashinfo"`
}
type FsListResp struct {
Content []ObjResp `json:"content"`
Total int64 `json:"total"`
Readme string `json:"readme"`
Write bool `json:"write"`
Provider string `json:"provider"`
}
type ArchiveMetaReq struct {
ArchivePass string `json:"archive_pass"`
Password string `json:"password"`
Path string `json:"path"`
Refresh bool `json:"refresh"`
}
type TreeResp struct {
ObjResp
Children []TreeResp `json:"children"`
hashCache *utils.HashInfo
}
func (t *TreeResp) GetSize() int64 {
return t.Size
}
func (t *TreeResp) GetName() string {
return t.Name
}
func (t *TreeResp) ModTime() time.Time {
return t.Modified
}
func (t *TreeResp) CreateTime() time.Time {
return t.Created
}
func (t *TreeResp) IsDir() bool {
return t.ObjResp.IsDir
}
func (t *TreeResp) GetHash() utils.HashInfo {
return utils.FromString(t.HashInfo)
}
func (t *TreeResp) GetID() string {
return ""
}
func (t *TreeResp) GetPath() string {
return ""
}
func (t *TreeResp) GetChildren() []model.ObjTree {
ret := make([]model.ObjTree, 0, len(t.Children))
for _, child := range t.Children {
ret = append(ret, &child)
}
return ret
}
func (t *TreeResp) Thumb() string {
return t.ObjResp.Thumb
}
type ArchiveMetaResp struct {
Comment string `json:"comment"`
Encrypted bool `json:"encrypted"`
Content []TreeResp `json:"content"`
RawURL string `json:"raw_url"`
Sign string `json:"sign"`
}
type ArchiveListReq struct {
model.PageReq
ArchiveMetaReq
InnerPath string `json:"inner_path"`
}
type ArchiveListResp struct {
Content []ObjResp `json:"content"`
Total int64 `json:"total"`
}

View File

@ -0,0 +1,32 @@
package openlist_share
import (
"fmt"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
func (d *OpenListShare) request(api, method string, callback base.ReqCallback) ([]byte, int, error) {
url := d.Address + "/api" + api
req := base.RestyClient.R()
if callback != nil {
callback(req)
}
res, err := req.Execute(method, url)
if err != nil {
code := 0
if res != nil {
code = res.StatusCode()
}
return nil, code, err
}
if res.StatusCode() >= 400 {
return nil, res.StatusCode(), fmt.Errorf("request failed, status: %s", res.Status())
}
code := utils.Json.Get(res.Body(), "code").ToInt()
if code != 200 {
return nil, code, fmt.Errorf("request failed, code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString())
}
return res.Body(), 200, nil
}

View File

@ -149,13 +149,19 @@ func (d *QuarkOrUC) getTranscodingLink(file model.Obj) (*model.Link, error) {
return nil, err return nil, err
} }
for _, info := range resp.Data.VideoList {
if info.VideoInfo.URL != "" {
return &model.Link{ return &model.Link{
URL: resp.Data.VideoList[0].VideoInfo.URL, URL: info.VideoInfo.URL,
ContentLength: resp.Data.VideoList[0].VideoInfo.Size, ContentLength: info.VideoInfo.Size,
Concurrency: 3, Concurrency: 3,
PartSize: 10 * utils.MB, PartSize: 10 * utils.MB,
}, nil }, nil
} }
}
return nil, errors.New("no link found")
}
func (d *QuarkOrUC) upPre(file model.FileStreamer, parentId string) (UpPreResp, error) { func (d *QuarkOrUC) upPre(file model.FileStreamer, parentId string) (UpPreResp, error) {
now := time.Now() now := time.Now()

View File

@ -228,13 +228,19 @@ func (d *QuarkUCTV) getTranscodingLink(ctx context.Context, file model.Obj) (*mo
return nil, err return nil, err
} }
for _, info := range fileLink.Data.VideoInfo {
if info.URL != "" {
return &model.Link{ return &model.Link{
URL: fileLink.Data.VideoInfo[0].URL, URL: info.URL,
ContentLength: info.Size,
Concurrency: 3, Concurrency: 3,
PartSize: 10 * utils.MB, PartSize: 10 * utils.MB,
ContentLength: fileLink.Data.VideoInfo[0].Size,
}, nil }, nil
} }
}
return nil, errors.New("no link found")
}
func (d *QuarkUCTV) getDownloadLink(ctx context.Context, file model.Obj) (*model.Link, error) { func (d *QuarkUCTV) getDownloadLink(ctx context.Context, file model.Obj) (*model.Link, error) {
var fileLink DownloadFileLink var fileLink DownloadFileLink

137
drivers/teldrive/copy.go Normal file
View File

@ -0,0 +1,137 @@
package teldrive
import (
"fmt"
"net/http"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
func NewCopyManager(ctx context.Context, concurrent int, d *Teldrive) *CopyManager {
g, ctx := errgroup.WithContext(ctx)
return &CopyManager{
TaskChan: make(chan CopyTask, concurrent*2),
Sem: semaphore.NewWeighted(int64(concurrent)),
G: g,
Ctx: ctx,
d: d,
}
}
func (cm *CopyManager) startWorkers() {
workerCount := cap(cm.TaskChan) / 2
for i := 0; i < workerCount; i++ {
cm.G.Go(func() error {
return cm.worker()
})
}
}
func (cm *CopyManager) worker() error {
for {
select {
case task, ok := <-cm.TaskChan:
if !ok {
return nil
}
if err := cm.Sem.Acquire(cm.Ctx, 1); err != nil {
return err
}
var err error
err = cm.processFile(task)
cm.Sem.Release(1)
if err != nil {
return fmt.Errorf("task processing failed: %w", err)
}
case <-cm.Ctx.Done():
return cm.Ctx.Err()
}
}
}
func (cm *CopyManager) generateTasks(ctx context.Context, srcObj, dstDir model.Obj) error {
if srcObj.IsDir() {
return cm.generateFolderTasks(ctx, srcObj, dstDir)
} else {
// add single file task directly
select {
case cm.TaskChan <- CopyTask{SrcObj: srcObj, DstDir: dstDir}:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
}
func (cm *CopyManager) generateFolderTasks(ctx context.Context, srcDir, dstDir model.Obj) error {
objs, err := cm.d.List(ctx, srcDir, model.ListArgs{})
if err != nil {
return fmt.Errorf("failed to list directory %s: %w", srcDir.GetPath(), err)
}
err = cm.d.MakeDir(cm.Ctx, dstDir, srcDir.GetName())
if err != nil || len(objs) == 0 {
return err
}
newDstDir := &model.Object{
ID: dstDir.GetID(),
Path: dstDir.GetPath() + "/" + srcDir.GetName(),
Name: srcDir.GetName(),
IsFolder: true,
}
for _, file := range objs {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
srcFile := &model.Object{
ID: file.GetID(),
Path: srcDir.GetPath() + "/" + file.GetName(),
Name: file.GetName(),
IsFolder: file.IsDir(),
}
// 递归生成任务
if err := cm.generateTasks(ctx, srcFile, newDstDir); err != nil {
return err
}
}
return nil
}
func (cm *CopyManager) processFile(task CopyTask) error {
return cm.copySingleFile(cm.Ctx, task.SrcObj, task.DstDir)
}
func (cm *CopyManager) copySingleFile(ctx context.Context, srcObj, dstDir model.Obj) error {
// `override copy mode` should delete the existing file
if obj, err := cm.d.getFile(dstDir.GetPath(), srcObj.GetName(), srcObj.IsDir()); err == nil {
if err := cm.d.Remove(ctx, obj); err != nil {
return fmt.Errorf("failed to remove existing file: %w", err)
}
}
// Do copy
return cm.d.request(http.MethodPost, "/api/files/{id}/copy", func(req *resty.Request) {
req.SetPathParam("id", srcObj.GetID())
req.SetBody(base.Json{
"newName": srcObj.GetName(),
"destination": dstDir.GetPath(),
})
}, nil)
}

217
drivers/teldrive/driver.go Normal file
View File

@ -0,0 +1,217 @@
package teldrive
import (
"context"
"fmt"
"math"
"net/http"
"net/url"
"strings"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
"github.com/google/uuid"
)
type Teldrive struct {
model.Storage
Addition
}
func (d *Teldrive) Config() driver.Config {
return config
}
func (d *Teldrive) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Teldrive) Init(ctx context.Context) error {
d.Address = strings.TrimSuffix(d.Address, "/")
if d.Cookie == "" || !strings.HasPrefix(d.Cookie, "access_token=") {
return fmt.Errorf("cookie must start with 'access_token='")
}
if d.UploadConcurrency == 0 {
d.UploadConcurrency = 4
}
if d.ChunkSize == 0 {
d.ChunkSize = 10
}
op.MustSaveDriverStorage(d)
return nil
}
func (d *Teldrive) Drop(ctx context.Context) error {
return nil
}
func (d *Teldrive) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
var listResp ListResp
err := d.request(http.MethodGet, "/api/files", func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"path": dir.GetPath(),
"limit": "1000", // overide default 500, TODO pagination
})
}, &listResp)
if err != nil {
return nil, err
}
return utils.SliceConvert(listResp.Items, func(src Object) (model.Obj, error) {
return &model.Object{
ID: src.ID,
Name: src.Name,
Size: func() int64 {
if src.Type == "folder" {
return 0
}
return src.Size
}(),
IsFolder: src.Type == "folder",
Modified: src.UpdatedAt,
}, nil
})
}
func (d *Teldrive) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.UseShareLink {
shareObj, err := d.getShareFileById(file.GetID())
if err != nil || shareObj == nil {
if err := d.createShareFile(file.GetID()); err != nil {
return nil, err
}
shareObj, err = d.getShareFileById(file.GetID())
if err != nil {
return nil, err
}
}
return &model.Link{
URL: d.Address + "/api/shares/" + url.PathEscape(shareObj.Id) + "/files/" + url.PathEscape(file.GetID()) + "/" + url.PathEscape(file.GetName()),
}, nil
}
return &model.Link{
URL: d.Address + "/api/files/" + url.PathEscape(file.GetID()) + "/" + url.PathEscape(file.GetName()),
Header: http.Header{
"Cookie": {d.Cookie},
},
}, nil
}
func (d *Teldrive) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
return d.request(http.MethodPost, "/api/files/mkdir", func(req *resty.Request) {
req.SetBody(map[string]interface{}{
"path": parentDir.GetPath() + "/" + dirName,
})
}, nil)
}
func (d *Teldrive) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
body := base.Json{
"ids": []string{srcObj.GetID()},
"destinationParent": dstDir.GetID(),
}
return d.request(http.MethodPost, "/api/files/move", func(req *resty.Request) {
req.SetBody(body)
}, nil)
}
func (d *Teldrive) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
body := base.Json{
"name": newName,
}
return d.request(http.MethodPatch, "/api/files/{id}", func(req *resty.Request) {
req.SetPathParam("id", srcObj.GetID())
req.SetBody(body)
}, nil)
}
func (d *Teldrive) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
copyConcurrentLimit := 4
copyManager := NewCopyManager(ctx, copyConcurrentLimit, d)
copyManager.startWorkers()
copyManager.G.Go(func() error {
defer close(copyManager.TaskChan)
return copyManager.generateTasks(ctx, srcObj, dstDir)
})
return copyManager.G.Wait()
}
func (d *Teldrive) Remove(ctx context.Context, obj model.Obj) error {
body := base.Json{
"ids": []string{obj.GetID()},
}
return d.request(http.MethodPost, "/api/files/delete", func(req *resty.Request) {
req.SetBody(body)
}, nil)
}
func (d *Teldrive) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
fileId := uuid.New().String()
chunkSizeInMB := d.ChunkSize
chunkSize := chunkSizeInMB * 1024 * 1024 // Convert MB to bytes
totalSize := file.GetSize()
totalParts := int(math.Ceil(float64(totalSize) / float64(chunkSize)))
maxRetried := 3
// delete the upload task when finished or failed
defer func() {
_ = d.request(http.MethodDelete, "/api/uploads/{id}", func(req *resty.Request) {
req.SetPathParam("id", fileId)
}, nil)
}()
if obj, err := d.getFile(dstDir.GetPath(), file.GetName(), file.IsDir()); err == nil {
if err = d.Remove(ctx, obj); err != nil {
return err
}
}
// start the upload process
if err := d.request(http.MethodGet, "/api/uploads/fileId", func(req *resty.Request) {
req.SetPathParam("id", fileId)
}, nil); err != nil {
return err
}
if totalSize == 0 {
return d.touch(file.GetName(), dstDir.GetPath())
}
if totalParts <= 1 {
return d.doSingleUpload(ctx, dstDir, file, up, totalParts, chunkSize, fileId)
}
return d.doMultiUpload(ctx, dstDir, file, up, maxRetried, totalParts, chunkSize, fileId)
}
func (d *Teldrive) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Teldrive) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Teldrive) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Teldrive) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
// return errs.NotImplement to use an internal archive tool
return nil, errs.NotImplement
}
//func (d *Teldrive) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*Teldrive)(nil)

26
drivers/teldrive/meta.go Normal file
View File

@ -0,0 +1,26 @@
package teldrive
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
driver.RootPath
Address string `json:"url" required:"true"`
Cookie string `json:"cookie" type:"string" required:"true" help:"access_token=xxx"`
UseShareLink bool `json:"use_share_link" type:"bool" default:"false" help:"Create share link when getting link to support 302. If disabled, you need to enable web proxy."`
ChunkSize int64 `json:"chunk_size" type:"number" default:"10" help:"Chunk size in MiB"`
UploadConcurrency int64 `json:"upload_concurrency" type:"number" default:"4" help:"Concurrency upload requests"`
}
var config = driver.Config{
Name: "Teldrive",
DefaultRoot: "/",
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Teldrive{}
})
}

77
drivers/teldrive/types.go Normal file
View File

@ -0,0 +1,77 @@
package teldrive
import (
"context"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
type ErrResp struct {
Code int `json:"code"`
Message string `json:"message"`
}
type Object struct {
ID string `json:"id"`
Name string `json:"name"`
Type string `json:"type"`
MimeType string `json:"mimeType"`
Category string `json:"category,omitempty"`
ParentId string `json:"parentId"`
Size int64 `json:"size"`
Encrypted bool `json:"encrypted"`
UpdatedAt time.Time `json:"updatedAt"`
}
type ListResp struct {
Items []Object `json:"items"`
Meta struct {
Count int `json:"count"`
TotalPages int `json:"totalPages"`
CurrentPage int `json:"currentPage"`
} `json:"meta"`
}
type FilePart struct {
Name string `json:"name"`
PartId int `json:"partId"`
PartNo int `json:"partNo"`
ChannelId int `json:"channelId"`
Size int `json:"size"`
Encrypted bool `json:"encrypted"`
Salt string `json:"salt"`
}
type chunkTask struct {
chunkIdx int
fileName string
chunkSize int64
reader *stream.SectionReader
ss *stream.StreamSectionReader
}
type CopyManager struct {
TaskChan chan CopyTask
Sem *semaphore.Weighted
G *errgroup.Group
Ctx context.Context
d *Teldrive
}
type CopyTask struct {
SrcObj model.Obj
DstDir model.Obj
}
type ShareObj struct {
Id string `json:"id"`
Protected bool `json:"protected"`
UserId int `json:"userId"`
Type string `json:"type"`
Name string `json:"name"`
ExpiresAt time.Time `json:"expiresAt"`
}

373
drivers/teldrive/upload.go Normal file
View File

@ -0,0 +1,373 @@
package teldrive
import (
"fmt"
"io"
"net/http"
"sort"
"strconv"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
"github.com/pkg/errors"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
// create empty file
func (d *Teldrive) touch(name, path string) error {
uploadBody := base.Json{
"name": name,
"type": "file",
"path": path,
}
if err := d.request(http.MethodPost, "/api/files", func(req *resty.Request) {
req.SetBody(uploadBody)
}, nil); err != nil {
return err
}
return nil
}
func (d *Teldrive) createFileOnUploadSuccess(name, id, path string, uploadedFileParts []FilePart, totalSize int64) error {
remoteFileParts, err := d.getFilePart(id)
if err != nil {
return err
}
// check if the uploaded file parts match the remote file parts
if len(remoteFileParts) != len(uploadedFileParts) {
return fmt.Errorf("[Teldrive] file parts count mismatch: expected %d, got %d", len(uploadedFileParts), len(remoteFileParts))
}
formatParts := make([]base.Json, 0)
for _, p := range remoteFileParts {
formatParts = append(formatParts, base.Json{
"id": p.PartId,
"salt": p.Salt,
})
}
uploadBody := base.Json{
"name": name,
"type": "file",
"path": path,
"parts": formatParts,
"size": totalSize,
}
// create file here
if err := d.request(http.MethodPost, "/api/files", func(req *resty.Request) {
req.SetBody(uploadBody)
}, nil); err != nil {
return err
}
return nil
}
func (d *Teldrive) checkFilePartExist(fileId string, partId int) (FilePart, error) {
var uploadedParts []FilePart
var filePart FilePart
if err := d.request(http.MethodGet, "/api/uploads/{id}", func(req *resty.Request) {
req.SetPathParam("id", fileId)
}, &uploadedParts); err != nil {
return filePart, err
}
for _, part := range uploadedParts {
if part.PartId == partId {
return part, nil
}
}
return filePart, nil
}
func (d *Teldrive) getFilePart(fileId string) ([]FilePart, error) {
var uploadedParts []FilePart
if err := d.request(http.MethodGet, "/api/uploads/{id}", func(req *resty.Request) {
req.SetPathParam("id", fileId)
}, &uploadedParts); err != nil {
return nil, err
}
return uploadedParts, nil
}
func (d *Teldrive) singleUploadRequest(fileId string, callback base.ReqCallback, resp interface{}) error {
url := d.Address + "/api/uploads/" + fileId
client := resty.New().SetTimeout(0)
ctx := context.Background()
req := client.R().
SetContext(ctx)
req.SetHeader("Cookie", d.Cookie)
req.SetHeader("Content-Type", "application/octet-stream")
req.SetContentLength(true)
req.AddRetryCondition(func(r *resty.Response, err error) bool {
return false
})
if callback != nil {
callback(req)
}
if resp != nil {
req.SetResult(resp)
}
var e ErrResp
req.SetError(&e)
_req, err := req.Execute(http.MethodPost, url)
if err != nil {
return err
}
if _req.IsError() {
return &e
}
return nil
}
func (d *Teldrive) doSingleUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up model.UpdateProgress,
totalParts int, chunkSize int64, fileId string) error {
totalSize := file.GetSize()
var fileParts []FilePart
var uploaded int64 = 0
ss, err := stream.NewStreamSectionReader(file, int(totalSize), &up)
if err != nil {
return err
}
for uploaded < totalSize {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
curChunkSize := min(totalSize-uploaded, chunkSize)
rd, err := ss.GetSectionReader(uploaded, curChunkSize)
if err != nil {
return err
}
filePart := &FilePart{}
if err := retry.Do(func() error {
if _, err := rd.Seek(0, io.SeekStart); err != nil {
return err
}
if err := d.singleUploadRequest(fileId, func(req *resty.Request) {
uploadParams := map[string]string{
"partName": func() string {
digits := len(fmt.Sprintf("%d", totalParts))
return file.GetName() + fmt.Sprintf(".%0*d", digits, 1)
}(),
"partNo": strconv.Itoa(1),
"fileName": file.GetName(),
}
req.SetQueryParams(uploadParams)
req.SetBody(driver.NewLimitedUploadStream(ctx, rd))
req.SetHeader("Content-Length", strconv.FormatInt(curChunkSize, 10))
}, filePart); err != nil {
return err
}
return nil
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second)); err != nil {
return err
}
if filePart.Name != "" {
fileParts = append(fileParts, *filePart)
uploaded += curChunkSize
up(float64(uploaded) / float64(totalSize))
ss.FreeSectionReader(rd)
}
}
return d.createFileOnUploadSuccess(file.GetName(), fileId, dstDir.GetPath(), fileParts, totalSize)
}
func (d *Teldrive) doMultiUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up model.UpdateProgress,
maxRetried, totalParts int, chunkSize int64, fileId string) error {
concurrent := d.UploadConcurrency
g, ctx := errgroup.WithContext(ctx)
sem := semaphore.NewWeighted(int64(concurrent))
chunkChan := make(chan chunkTask, concurrent*2)
resultChan := make(chan FilePart, concurrent)
totalSize := file.GetSize()
ss, err := stream.NewStreamSectionReader(file, int(totalSize), &up)
if err != nil {
return err
}
ssLock := sync.Mutex{}
g.Go(func() error {
defer close(chunkChan)
chunkIdx := 0
for chunkIdx < totalParts {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
offset := int64(chunkIdx) * chunkSize
curChunkSize := min(totalSize-offset, chunkSize)
ssLock.Lock()
reader, err := ss.GetSectionReader(offset, curChunkSize)
ssLock.Unlock()
if err != nil {
return err
}
task := chunkTask{
chunkIdx: chunkIdx + 1,
chunkSize: curChunkSize,
fileName: file.GetName(),
reader: reader,
ss: ss,
}
// freeSectionReader will be called in d.uploadSingleChunk
select {
case chunkChan <- task:
chunkIdx++
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
for i := 0; i < int(concurrent); i++ {
g.Go(func() error {
for task := range chunkChan {
if err := sem.Acquire(ctx, 1); err != nil {
return err
}
filePart, err := d.uploadSingleChunk(ctx, fileId, task, totalParts, maxRetried)
sem.Release(1)
if err != nil {
return fmt.Errorf("upload chunk %d failed: %w", task.chunkIdx, err)
}
select {
case resultChan <- *filePart:
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
}
var fileParts []FilePart
var collectErr error
collectDone := make(chan struct{})
go func() {
defer close(collectDone)
fileParts = make([]FilePart, 0, totalParts)
done := make(chan error, 1)
go func() {
done <- g.Wait()
close(resultChan)
}()
for {
select {
case filePart, ok := <-resultChan:
if !ok {
collectErr = <-done
return
}
fileParts = append(fileParts, filePart)
case err := <-done:
collectErr = err
return
}
}
}()
<-collectDone
if collectErr != nil {
return fmt.Errorf("multi-upload failed: %w", collectErr)
}
sort.Slice(fileParts, func(i, j int) bool {
return fileParts[i].PartNo < fileParts[j].PartNo
})
return d.createFileOnUploadSuccess(file.GetName(), fileId, dstDir.GetPath(), fileParts, totalSize)
}
func (d *Teldrive) uploadSingleChunk(ctx context.Context, fileId string, task chunkTask, totalParts, maxRetried int) (*FilePart, error) {
filePart := &FilePart{}
retryCount := 0
defer task.ss.FreeSectionReader(task.reader)
for {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
if existingPart, err := d.checkFilePartExist(fileId, task.chunkIdx); err == nil && existingPart.Name != "" {
return &existingPart, nil
}
err := d.singleUploadRequest(fileId, func(req *resty.Request) {
uploadParams := map[string]string{
"partName": func() string {
digits := len(fmt.Sprintf("%d", totalParts))
return task.fileName + fmt.Sprintf(".%0*d", digits, task.chunkIdx)
}(),
"partNo": strconv.Itoa(task.chunkIdx),
"fileName": task.fileName,
}
req.SetQueryParams(uploadParams)
req.SetBody(driver.NewLimitedUploadStream(ctx, task.reader))
req.SetHeader("Content-Length", strconv.Itoa(int(task.chunkSize)))
}, filePart)
if err == nil {
return filePart, nil
}
if retryCount >= maxRetried {
return nil, fmt.Errorf("upload failed after %d retries: %w", maxRetried, err)
}
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
continue
}
retryCount++
utils.Log.Errorf("[Teldrive] upload error: %v, retrying %d times", err, retryCount)
backoffDuration := time.Duration(retryCount*retryCount) * time.Second
if backoffDuration > 30*time.Second {
backoffDuration = 30 * time.Second
}
select {
case <-time.After(backoffDuration):
case <-ctx.Done():
return nil, ctx.Err()
}
}
}

109
drivers/teldrive/util.go Normal file
View File

@ -0,0 +1,109 @@
package teldrive
import (
"fmt"
"net/http"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/go-resty/resty/v2"
)
// do others that not defined in Driver interface
func (d *Teldrive) request(method string, pathname string, callback base.ReqCallback, resp interface{}) error {
url := d.Address + pathname
req := base.RestyClient.R()
req.SetHeader("Cookie", d.Cookie)
if callback != nil {
callback(req)
}
if resp != nil {
req.SetResult(resp)
}
var e ErrResp
req.SetError(&e)
_req, err := req.Execute(method, url)
if err != nil {
return err
}
if _req.IsError() {
return &e
}
return nil
}
func (d *Teldrive) getFile(path, name string, isFolder bool) (model.Obj, error) {
resp := &ListResp{}
err := d.request(http.MethodGet, "/api/files", func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"path": path,
"name": name,
"type": func() string {
if isFolder {
return "folder"
}
return "file"
}(),
"operation": "find",
})
}, resp)
if err != nil {
return nil, err
}
if len(resp.Items) == 0 {
return nil, fmt.Errorf("file not found: %s/%s", path, name)
}
obj := resp.Items[0]
return &model.Object{
ID: obj.ID,
Name: obj.Name,
Size: obj.Size,
IsFolder: obj.Type == "folder",
}, err
}
func (err *ErrResp) Error() string {
if err == nil {
return ""
}
return fmt.Sprintf("[Teldrive] message:%s Error code:%d", err.Message, err.Code)
}
func (d *Teldrive) createShareFile(fileId string) error {
var errResp ErrResp
if err := d.request(http.MethodPost, "/api/files/{id}/share", func(req *resty.Request) {
req.SetPathParam("id", fileId)
req.SetBody(base.Json{
"expiresAt": getDateTime(),
})
}, &errResp); err != nil {
return err
}
if errResp.Message != "" {
return &errResp
}
return nil
}
func (d *Teldrive) getShareFileById(fileId string) (*ShareObj, error) {
var shareObj ShareObj
if err := d.request(http.MethodGet, "/api/files/{id}/share", func(req *resty.Request) {
req.SetPathParam("id", fileId)
}, &shareObj); err != nil {
return nil, err
}
return &shareObj, nil
}
func getDateTime() string {
now := time.Now().UTC()
formattedWithMs := now.Add(time.Hour * 1).Format("2006-01-02T15:04:05.000Z")
return formattedWithMs
}

View File

@ -132,7 +132,7 @@ func (d *Terabox) Remove(ctx context.Context, obj model.Obj) error {
func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
resp, err := base.RestyClient.R(). resp, err := base.RestyClient.R().
SetContext(ctx). SetContext(ctx).
Get("https://d.terabox.com/rest/2.0/pcs/file?method=locateupload") Get("https://" + d.url_domain_prefix + "-data.terabox.com/rest/2.0/pcs/file?method=locateupload")
if err != nil { if err != nil {
return err return err
} }

View File

@ -36,5 +36,6 @@ func (d *Wopan) getSpaceType() string {
// 20230607214351 // 20230607214351
func getTime(str string) (time.Time, error) { func getTime(str string) (time.Time, error) {
return time.Parse("20060102150405", str) loc := time.FixedZone("UTC+8", 8*60*60)
return time.ParseInLocation("20060102150405", str, loc)
} }

View File

@ -5,26 +5,35 @@ umask ${UMASK}
if [ "$1" = "version" ]; then if [ "$1" = "version" ]; then
./openlist version ./openlist version
else else
# Define the target directory path for openlist service # Check file of /opt/openlist/data permissions for current user
OPENLIST_DIR="/opt/service/start/openlist" # 检查当前用户是否有当前目录的写和执行权限
if [ ! -d "$OPENLIST_DIR" ]; then if [ -d ./data ]; then
cp -r /opt/service/stop/openlist "$OPENLIST_DIR" 2>/dev/null if ! [ -w ./data ] || ! [ -x ./data ]; then
cat <<EOF
Error: Current user does not have write and/or execute permissions for the ./data directory: $(pwd)/data
Please visit https://doc.oplist.org/guide/installation/docker#for-version-after-v4-1-0 for more information.
错误:当前用户没有 ./data 目录($(pwd)/data的写和/或执行权限。
请访问 https://doc.oplist.org/guide/installation/docker#v4-1-0-%E4%BB%A5%E5%90%8E%E7%89%88%E6%9C%AC 获取更多信息。
Exiting...
EOF
exit 1
fi fi
fi
# Define the target directory path for aria2 service # Define the target directory path for aria2 service
ARIA2_DIR="/opt/service/start/aria2" ARIA2_DIR="/opt/service/start/aria2"
if [ "$RUN_ARIA2" = "true" ]; then if [ "$RUN_ARIA2" = "true" ]; then
# If aria2 should run and target directory doesn't exist, copy it # If aria2 should run and target directory doesn't exist, copy it
if [ ! -d "$ARIA2_DIR" ]; then if [ ! -d "$ARIA2_DIR" ]; then
mkdir -p "$ARIA2_DIR" mkdir -p "$ARIA2_DIR"
cp -r /opt/service/stop/aria2/* "$ARIA2_DIR" 2>/dev/null cp -r /opt/service/stop/aria2/* "$ARIA2_DIR" 2>/dev/null
fi fi
runsvdir /opt/service/start &
else else
# If aria2 should NOT run and target directory exists, remove it # If aria2 should NOT run and target directory exists, remove it
if [ -d "$ARIA2_DIR" ]; then if [ -d "$ARIA2_DIR" ]; then
rm -rf "$ARIA2_DIR" rm -rf "$ARIA2_DIR"
fi fi
fi fi
exec ./openlist server --no-prefix
exec runsvdir /opt/service/start
fi fi

4
go.mod
View File

@ -11,7 +11,7 @@ require (
github.com/OpenListTeam/times v0.1.0 github.com/OpenListTeam/times v0.1.0
github.com/OpenListTeam/wopan-sdk-go v0.1.5 github.com/OpenListTeam/wopan-sdk-go v0.1.5
github.com/ProtonMail/go-crypto v1.3.0 github.com/ProtonMail/go-crypto v1.3.0
github.com/SheltonZhu/115driver v1.1.0 github.com/SheltonZhu/115driver v1.1.1
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
github.com/avast/retry-go v3.0.0+incompatible github.com/avast/retry-go v3.0.0+incompatible
github.com/aws/aws-sdk-go v1.55.7 github.com/aws/aws-sdk-go v1.55.7
@ -35,7 +35,6 @@ require (
github.com/go-resty/resty/v2 v2.16.5 github.com/go-resty/resty/v2 v2.16.5
github.com/go-webauthn/webauthn v0.13.4 github.com/go-webauthn/webauthn v0.13.4
github.com/golang-jwt/jwt/v4 v4.5.2 github.com/golang-jwt/jwt/v4 v4.5.2
github.com/golang-jwt/jwt/v5 v5.3.0
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.3 github.com/gorilla/websocket v1.5.3
github.com/hekmon/transmissionrpc/v3 v3.0.0 github.com/hekmon/transmissionrpc/v3 v3.0.0
@ -179,6 +178,7 @@ require (
github.com/go-sql-driver/mysql v1.7.0 // indirect github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-webauthn/x v0.1.23 // indirect github.com/go-webauthn/x v0.1.23 // indirect
github.com/goccy/go-json v0.10.5 // indirect github.com/goccy/go-json v0.10.5 // indirect
github.com/golang-jwt/jwt/v5 v5.2.3 // indirect
github.com/golang/protobuf v1.5.4 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4 // indirect github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-tpm v0.9.5 // indirect github.com/google/go-tpm v0.9.5 // indirect

6
go.sum
View File

@ -59,8 +59,8 @@ github.com/RoaringBitmap/roaring/v2 v2.4.5 h1:uGrrMreGjvAtTBobc0g5IrW1D5ldxDQYe2
github.com/RoaringBitmap/roaring/v2 v2.4.5/go.mod h1:FiJcsfkGje/nZBZgCu0ZxCPOKD/hVXDS2dXi7/eUFE0= github.com/RoaringBitmap/roaring/v2 v2.4.5/go.mod h1:FiJcsfkGje/nZBZgCu0ZxCPOKD/hVXDS2dXi7/eUFE0=
github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg= github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg=
github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4= github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4=
github.com/SheltonZhu/115driver v1.1.0 h1:kA8Vtu5JVWqqJFiTF06+HDb9zVEO6ZSdyjV5HsGx7Wg= github.com/SheltonZhu/115driver v1.1.1 h1:9EMhe2ZJflGiAaZbYInw2jqxTcqZNF+DtVDsEy70aFU=
github.com/SheltonZhu/115driver v1.1.0/go.mod h1:rKvNd4Y4OkXv1TMbr/SKjGdcvMQxh6AW5Tw9w0CJb7E= github.com/SheltonZhu/115driver v1.1.1/go.mod h1:rKvNd4Y4OkXv1TMbr/SKjGdcvMQxh6AW5Tw9w0CJb7E=
github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0= github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=
github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM= github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM=
github.com/aead/ecdh v0.2.0 h1:pYop54xVaq/CEREFEcukHRZfTdjiWvYIsZDXXrBapQQ= github.com/aead/ecdh v0.2.0 h1:pYop54xVaq/CEREFEcukHRZfTdjiWvYIsZDXXrBapQQ=
@ -306,8 +306,6 @@ github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXe
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v5 v5.2.3 h1:kkGXqQOBSDDWRhWNXTFpqGSCMyh/PLnqUvMGJPDJDs0= github.com/golang-jwt/jwt/v5 v5.2.3 h1:kkGXqQOBSDDWRhWNXTFpqGSCMyh/PLnqUvMGJPDJDs0=
github.com/golang-jwt/jwt/v5 v5.2.3/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-jwt/jwt/v5 v5.2.3/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=

View File

@ -77,6 +77,10 @@ func InitConfig() {
log.Fatalf("update config struct error: %+v", err) log.Fatalf("update config struct error: %+v", err)
} }
} }
if !conf.Conf.Force {
confFromEnv()
}
if conf.Conf.MaxConcurrency > 0 { if conf.Conf.MaxConcurrency > 0 {
net.DefaultConcurrencyLimit = &net.ConcurrencyLimit{Limit: conf.Conf.MaxConcurrency} net.DefaultConcurrencyLimit = &net.ConcurrencyLimit{Limit: conf.Conf.MaxConcurrency}
} }
@ -92,25 +96,31 @@ func InitConfig() {
conf.MaxBufferLimit = conf.Conf.MaxBufferLimit * utils.MB conf.MaxBufferLimit = conf.Conf.MaxBufferLimit * utils.MB
} }
log.Infof("max buffer limit: %dMB", conf.MaxBufferLimit/utils.MB) log.Infof("max buffer limit: %dMB", conf.MaxBufferLimit/utils.MB)
if !conf.Conf.Force { if conf.Conf.MmapThreshold > 0 {
confFromEnv() conf.MmapThreshold = conf.Conf.MmapThreshold * utils.MB
} else {
conf.MmapThreshold = 0
} }
log.Infof("mmap threshold: %dMB", conf.Conf.MmapThreshold)
if len(conf.Conf.Log.Filter.Filters) == 0 { if len(conf.Conf.Log.Filter.Filters) == 0 {
conf.Conf.Log.Filter.Enable = false conf.Conf.Log.Filter.Enable = false
} }
// convert abs path // convert abs path
convertAbsPath := func(path *string) { convertAbsPath := func(path *string) {
if !filepath.IsAbs(*path) { if *path != "" && !filepath.IsAbs(*path) {
*path = filepath.Join(pwd, *path) *path = filepath.Join(pwd, *path)
} }
} }
convertAbsPath(&conf.Conf.Database.DBFile)
convertAbsPath(&conf.Conf.Scheme.CertFile)
convertAbsPath(&conf.Conf.Scheme.KeyFile)
convertAbsPath(&conf.Conf.Scheme.UnixFile)
convertAbsPath(&conf.Conf.Log.Name)
convertAbsPath(&conf.Conf.TempDir) convertAbsPath(&conf.Conf.TempDir)
convertAbsPath(&conf.Conf.BleveDir) convertAbsPath(&conf.Conf.BleveDir)
convertAbsPath(&conf.Conf.Log.Name)
convertAbsPath(&conf.Conf.Database.DBFile)
if conf.Conf.DistDir != "" {
convertAbsPath(&conf.Conf.DistDir) convertAbsPath(&conf.Conf.DistDir)
}
err := os.MkdirAll(conf.Conf.TempDir, 0o777) err := os.MkdirAll(conf.Conf.TempDir, 0o777)
if err != nil { if err != nil {
log.Fatalf("create temp dir error: %+v", err) log.Fatalf("create temp dir error: %+v", err)

View File

@ -111,6 +111,7 @@ func InitialSettings() []model.SettingItem {
{Key: conf.Favicon, Value: "https://res.oplist.org/logo/logo.svg", MigrationValue: "https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.svg", Type: conf.TypeString, Group: model.STYLE}, {Key: conf.Favicon, Value: "https://res.oplist.org/logo/logo.svg", MigrationValue: "https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.svg", Type: conf.TypeString, Group: model.STYLE},
{Key: conf.MainColor, Value: "#1890ff", Type: conf.TypeString, Group: model.STYLE}, {Key: conf.MainColor, Value: "#1890ff", Type: conf.TypeString, Group: model.STYLE},
{Key: "home_icon", Value: "🏠", Type: conf.TypeString, Group: model.STYLE}, {Key: "home_icon", Value: "🏠", Type: conf.TypeString, Group: model.STYLE},
{Key: "share_icon", Value: "🎁", Type: conf.TypeString, Group: model.STYLE},
{Key: "home_container", Value: "max_980px", Type: conf.TypeSelect, Options: "max_980px,hope_container", Group: model.STYLE}, {Key: "home_container", Value: "max_980px", Type: conf.TypeSelect, Options: "max_980px,hope_container", Group: model.STYLE},
{Key: "settings_layout", Value: "list", Type: conf.TypeSelect, Options: "list,responsive", Group: model.STYLE}, {Key: "settings_layout", Value: "list", Type: conf.TypeSelect, Options: "list,responsive", Group: model.STYLE},
// preview settings // preview settings
@ -161,8 +162,12 @@ func InitialSettings() []model.SettingItem {
{Key: conf.OcrApi, Value: "https://openlistteam-ocr-api-server.hf.space/ocr/file/json", MigrationValue: "https://api.example.com/ocr/file/json", Type: conf.TypeString, Group: model.GLOBAL}, // TODO: This can be replace by a community-hosted endpoint, see https://github.com/OpenListTeam/ocr_api_server {Key: conf.OcrApi, Value: "https://openlistteam-ocr-api-server.hf.space/ocr/file/json", MigrationValue: "https://api.example.com/ocr/file/json", Type: conf.TypeString, Group: model.GLOBAL}, // TODO: This can be replace by a community-hosted endpoint, see https://github.com/OpenListTeam/ocr_api_server
{Key: conf.FilenameCharMapping, Value: `{"/": "|"}`, Type: conf.TypeText, Group: model.GLOBAL}, {Key: conf.FilenameCharMapping, Value: `{"/": "|"}`, Type: conf.TypeText, Group: model.GLOBAL},
{Key: conf.ForwardDirectLinkParams, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL}, {Key: conf.ForwardDirectLinkParams, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL},
{Key: conf.IgnoreDirectLinkParams, Value: "sign,openlist_ts", Type: conf.TypeString, Group: model.GLOBAL}, {Key: conf.IgnoreDirectLinkParams, Value: "sign,openlist_ts,raw", Type: conf.TypeString, Group: model.GLOBAL},
{Key: conf.WebauthnLoginEnabled, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PUBLIC}, {Key: conf.WebauthnLoginEnabled, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PUBLIC},
{Key: conf.SharePreview, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PUBLIC},
{Key: conf.ShareArchivePreview, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PUBLIC},
{Key: conf.ShareForceProxy, Value: "true", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PRIVATE},
{Key: conf.ShareSummaryContent, Value: "@{{creator}} shared {{#each files}}{{#if @first}}\"{{filename this}}\"{{/if}}{{#if @last}}{{#unless (eq @index 0)}} and {{@index}} more files{{/unless}}{{/if}}{{/each}} from {{site_title}}: {{base_url}}/@s/{{id}}{{#if pwd}} , the share code is {{pwd}}{{/if}}{{#if expires}}, please access before {{dateLocaleString expires}}.{{/if}}", Type: conf.TypeText, Group: model.GLOBAL, Flag: model.PUBLIC},
// single settings // single settings
{Key: conf.Token, Value: token, Type: conf.TypeString, Group: model.SINGLE, Flag: model.PRIVATE}, {Key: conf.Token, Value: token, Type: conf.TypeString, Group: model.SINGLE, Flag: model.PRIVATE},

View File

@ -33,8 +33,8 @@ func initUser() {
Role: model.ADMIN, Role: model.ADMIN,
BasePath: "/", BasePath: "/",
Authn: "[]", Authn: "[]",
// 0(can see hidden) - 7(can remove) & 12(can read archives) - 13(can decompress archives) // 0(can see hidden) - 8(webdav read) & 12(can read archives) - 14(can share)
Permission: 0x31FF, Permission: 0x71FF,
} }
if err := op.CreateUser(admin); err != nil { if err := op.CreateUser(admin); err != nil {
panic(err) panic(err)

View File

@ -120,6 +120,7 @@ type Config struct {
Log LogConfig `json:"log" envPrefix:"LOG_"` Log LogConfig `json:"log" envPrefix:"LOG_"`
DelayedStart int `json:"delayed_start" env:"DELAYED_START"` DelayedStart int `json:"delayed_start" env:"DELAYED_START"`
MaxBufferLimit int `json:"max_buffer_limitMB" env:"MAX_BUFFER_LIMIT_MB"` MaxBufferLimit int `json:"max_buffer_limitMB" env:"MAX_BUFFER_LIMIT_MB"`
MmapThreshold int `json:"mmap_thresholdMB" env:"MMAP_THRESHOLD_MB"`
MaxConnections int `json:"max_connections" env:"MAX_CONNECTIONS"` MaxConnections int `json:"max_connections" env:"MAX_CONNECTIONS"`
MaxConcurrency int `json:"max_concurrency" env:"MAX_CONCURRENCY"` MaxConcurrency int `json:"max_concurrency" env:"MAX_CONCURRENCY"`
TlsInsecureSkipVerify bool `json:"tls_insecure_skip_verify" env:"TLS_INSECURE_SKIP_VERIFY"` TlsInsecureSkipVerify bool `json:"tls_insecure_skip_verify" env:"TLS_INSECURE_SKIP_VERIFY"`
@ -176,6 +177,7 @@ func DefaultConfig(dataDir string) *Config {
}, },
}, },
MaxBufferLimit: -1, MaxBufferLimit: -1,
MmapThreshold: 4,
MaxConnections: 0, MaxConnections: 0,
MaxConcurrency: 64, MaxConcurrency: 64,
TlsInsecureSkipVerify: true, TlsInsecureSkipVerify: true,

View File

@ -33,6 +33,7 @@ const (
PreviewArchivesByDefault = "preview_archives_by_default" PreviewArchivesByDefault = "preview_archives_by_default"
ReadMeAutoRender = "readme_autorender" ReadMeAutoRender = "readme_autorender"
FilterReadMeScripts = "filter_readme_scripts" FilterReadMeScripts = "filter_readme_scripts"
// global // global
HideFiles = "hide_files" HideFiles = "hide_files"
CustomizeHead = "customize_head" CustomizeHead = "customize_head"
@ -45,6 +46,10 @@ const (
ForwardDirectLinkParams = "forward_direct_link_params" ForwardDirectLinkParams = "forward_direct_link_params"
IgnoreDirectLinkParams = "ignore_direct_link_params" IgnoreDirectLinkParams = "ignore_direct_link_params"
WebauthnLoginEnabled = "webauthn_login_enabled" WebauthnLoginEnabled = "webauthn_login_enabled"
SharePreview = "share_preview"
ShareArchivePreview = "share_archive_preview"
ShareForceProxy = "share_force_proxy"
ShareSummaryContent = "share_summary_content"
// index // index
SearchIndex = "search_index" SearchIndex = "search_index"
@ -167,4 +172,5 @@ const (
RequestHeaderKey RequestHeaderKey
UserAgentKey UserAgentKey
PathKey PathKey
SharingIDKey
) )

View File

@ -25,7 +25,10 @@ var PrivacyReg []*regexp.Regexp
var ( var (
// StoragesLoaded loaded success if empty // StoragesLoaded loaded success if empty
StoragesLoaded = false StoragesLoaded = false
// 单个Buffer最大限制
MaxBufferLimit = 16 * 1024 * 1024 MaxBufferLimit = 16 * 1024 * 1024
// 超过该阈值的Buffer将使用 mmap 分配,可主动释放内存
MmapThreshold = 4 * 1024 * 1024
) )
var ( var (
RawIndexHtml string RawIndexHtml string

View File

@ -12,7 +12,7 @@ var db *gorm.DB
func Init(d *gorm.DB) { func Init(d *gorm.DB) {
db = d db = d
err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey)) err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey), new(model.SharingDB))
if err != nil { if err != nil {
log.Fatalf("failed migrate database: %s", err.Error()) log.Fatalf("failed migrate database: %s", err.Error())
} }

62
internal/db/sharing.go Normal file
View File

@ -0,0 +1,62 @@
package db
import (
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils/random"
"github.com/pkg/errors"
)
func GetSharingById(id string) (*model.SharingDB, error) {
s := model.SharingDB{ID: id}
if err := db.Where(s).First(&s).Error; err != nil {
return nil, errors.Wrapf(err, "failed get sharing")
}
return &s, nil
}
func GetSharings(pageIndex, pageSize int) (sharings []model.SharingDB, count int64, err error) {
sharingDB := db.Model(&model.SharingDB{})
if err := sharingDB.Count(&count).Error; err != nil {
return nil, 0, errors.Wrapf(err, "failed get sharings count")
}
if err := sharingDB.Order(columnName("id")).Offset((pageIndex - 1) * pageSize).Limit(pageSize).Find(&sharings).Error; err != nil {
return nil, 0, errors.Wrapf(err, "failed get find sharings")
}
return sharings, count, nil
}
func GetSharingsByCreatorId(creator uint, pageIndex, pageSize int) (sharings []model.SharingDB, count int64, err error) {
sharingDB := db.Model(&model.SharingDB{})
cond := model.SharingDB{CreatorId: creator}
if err := sharingDB.Where(cond).Count(&count).Error; err != nil {
return nil, 0, errors.Wrapf(err, "failed get sharings count")
}
if err := sharingDB.Where(cond).Order(columnName("id")).Offset((pageIndex - 1) * pageSize).Limit(pageSize).Find(&sharings).Error; err != nil {
return nil, 0, errors.Wrapf(err, "failed get find sharings")
}
return sharings, count, nil
}
func CreateSharing(s *model.SharingDB) (string, error) {
id := random.String(8)
for len(id) < 12 {
old := model.SharingDB{
ID: id,
}
if err := db.Where(old).First(&old).Error; err != nil {
s.ID = id
return id, errors.WithStack(db.Create(s).Error)
}
id += random.String(1)
}
return "", errors.New("failed find valid id")
}
func UpdateSharing(s *model.SharingDB) error {
return errors.WithStack(db.Save(s).Error)
}
func DeleteSharingById(id string) error {
s := model.SharingDB{ID: id}
return errors.WithStack(db.Where(s).Delete(&s).Error)
}

View File

@ -23,6 +23,10 @@ var (
UnknownArchiveFormat = errors.New("unknown archive format") UnknownArchiveFormat = errors.New("unknown archive format")
WrongArchivePassword = errors.New("wrong archive password") WrongArchivePassword = errors.New("wrong archive password")
DriverExtractNotSupported = errors.New("driver extraction not supported") DriverExtractNotSupported = errors.New("driver extraction not supported")
WrongShareCode = errors.New("wrong share code")
InvalidSharing = errors.New("invalid sharing")
SharingNotFound = errors.New("sharing not found")
) )
// NewErr wrap constant error with an extra message // NewErr wrap constant error with an extra message

View File

@ -168,7 +168,7 @@ func GetStorage(path string, args *GetStoragesArgs) (driver.Driver, error) {
func Other(ctx context.Context, args model.FsOtherArgs) (interface{}, error) { func Other(ctx context.Context, args model.FsOtherArgs) (interface{}, error) {
res, err := other(ctx, args) res, err := other(ctx, args)
if err != nil { if err != nil {
log.Errorf("failed remove %s: %+v", args.Path, err) log.Errorf("failed get other %s: %+v", args.Path, err)
} }
return res, err return res, err
} }

View File

@ -77,6 +77,26 @@ type ArchiveDecompressArgs struct {
PutIntoNewDir bool PutIntoNewDir bool
} }
type SharingListArgs struct {
Refresh bool
Pwd string
}
type SharingArchiveMetaArgs struct {
ArchiveMetaArgs
Pwd string
}
type SharingArchiveListArgs struct {
ArchiveListArgs
Pwd string
}
type SharingLinkArgs struct {
Pwd string
LinkArgs
}
type RangeReaderIF interface { type RangeReaderIF interface {
RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error)
} }

47
internal/model/sharing.go Normal file
View File

@ -0,0 +1,47 @@
package model
import "time"
type SharingDB struct {
ID string `json:"id" gorm:"type:char(12);primaryKey"`
FilesRaw string `json:"-" gorm:"type:text"`
Expires *time.Time `json:"expires"`
Pwd string `json:"pwd"`
Accessed int `json:"accessed"`
MaxAccessed int `json:"max_accessed"`
CreatorId uint `json:"-"`
Disabled bool `json:"disabled"`
Remark string `json:"remark"`
Readme string `json:"readme" gorm:"type:text"`
Header string `json:"header" gorm:"type:text"`
Sort
}
type Sharing struct {
*SharingDB
Files []string `json:"files"`
Creator *User `json:"-"`
}
func (s *Sharing) Valid() bool {
if s.Disabled {
return false
}
if s.MaxAccessed > 0 && s.Accessed >= s.MaxAccessed {
return false
}
if len(s.Files) == 0 {
return false
}
if !s.Creator.CanShare() {
return false
}
if s.Expires != nil && !s.Expires.IsZero() && s.Expires.Before(time.Now()) {
return false
}
return true
}
func (s *Sharing) Verify(pwd string) bool {
return s.Pwd == "" || s.Pwd == pwd
}

View File

@ -54,6 +54,7 @@ type User struct {
// 11: ftp/sftp write // 11: ftp/sftp write
// 12: can read archives // 12: can read archives
// 13: can decompress archives // 13: can decompress archives
// 14: can share
Permission int32 `json:"permission"` Permission int32 `json:"permission"`
OtpSecret string `json:"-"` OtpSecret string `json:"-"`
SsoID string `json:"sso_id"` // unique by sso platform SsoID string `json:"sso_id"` // unique by sso platform
@ -145,6 +146,10 @@ func (u *User) CanDecompress() bool {
return (u.Permission>>13)&1 == 1 return (u.Permission>>13)&1 == 1
} }
func (u *User) CanShare() bool {
return (u.Permission>>14)&1 == 1
}
func (u *User) JoinPath(reqPath string) (string, error) { func (u *User) JoinPath(reqPath string) (string, error) {
return utils.JoinBasePath(u.BasePath, reqPath) return utils.JoinBasePath(u.BasePath, reqPath)
} }

View File

@ -1,7 +1,6 @@
package net package net
import ( import (
"bytes"
"context" "context"
"errors" "errors"
"fmt" "fmt"
@ -15,6 +14,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/rclone/rclone/lib/mmap"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range" "github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/awsutil"
@ -255,7 +255,10 @@ func (d *downloader) sendChunkTask(newConcurrency bool) error {
finalSize += firstSize - minSize finalSize += firstSize - minSize
} }
} }
buf.Reset(int(finalSize)) err := buf.Reset(int(finalSize))
if err != nil {
return err
}
ch := chunk{ ch := chunk{
start: d.pos, start: d.pos,
size: finalSize, size: finalSize,
@ -645,11 +648,13 @@ func (mr MultiReadCloser) Close() error {
} }
type Buf struct { type Buf struct {
buffer *bytes.Buffer
size int //expected size size int //expected size
ctx context.Context ctx context.Context
off int offR int
offW int
rw sync.Mutex rw sync.Mutex
buf []byte
mmap bool
readSignal chan struct{} readSignal chan struct{}
readPending bool readPending bool
@ -658,54 +663,62 @@ type Buf struct {
// NewBuf is a buffer that can have 1 read & 1 write at the same time. // NewBuf is a buffer that can have 1 read & 1 write at the same time.
// when read is faster write, immediately feed data to read after written // when read is faster write, immediately feed data to read after written
func NewBuf(ctx context.Context, maxSize int) *Buf { func NewBuf(ctx context.Context, maxSize int) *Buf {
return &Buf{ br := &Buf{
ctx: ctx, ctx: ctx,
buffer: bytes.NewBuffer(make([]byte, 0, maxSize)),
size: maxSize, size: maxSize,
readSignal: make(chan struct{}, 1), readSignal: make(chan struct{}, 1),
} }
if conf.MmapThreshold > 0 && maxSize >= conf.MmapThreshold {
m, err := mmap.Alloc(maxSize)
if err == nil {
br.buf = m
br.mmap = true
return br
} }
func (br *Buf) Reset(size int) {
br.rw.Lock()
defer br.rw.Unlock()
if br.buffer == nil {
return
} }
br.buffer.Reset() br.buf = make([]byte, maxSize)
br.size = size return br
br.off = 0
} }
func (br *Buf) Read(p []byte) (n int, err error) { func (br *Buf) Reset(size int) error {
br.rw.Lock()
defer br.rw.Unlock()
if br.buf == nil {
return io.ErrClosedPipe
}
if size > cap(br.buf) {
return fmt.Errorf("reset size %d exceeds max size %d", size, cap(br.buf))
}
br.size = size
br.offR = 0
br.offW = 0
return nil
}
func (br *Buf) Read(p []byte) (int, error) {
if err := br.ctx.Err(); err != nil { if err := br.ctx.Err(); err != nil {
return 0, err return 0, err
} }
if len(p) == 0 { if len(p) == 0 {
return 0, nil return 0, nil
} }
if br.off >= br.size { if br.offR >= br.size {
return 0, io.EOF return 0, io.EOF
} }
for { for {
br.rw.Lock() br.rw.Lock()
if br.buffer != nil { if br.buf == nil {
n, err = br.buffer.Read(p)
} else {
err = io.ErrClosedPipe
}
if err != nil && err != io.EOF {
br.rw.Unlock() br.rw.Unlock()
return return 0, io.ErrClosedPipe
} }
if n > 0 {
br.off += n if br.offW < br.offR {
br.rw.Unlock() br.rw.Unlock()
return n, nil return 0, io.ErrUnexpectedEOF
} }
if br.offW == br.offR {
br.readPending = true br.readPending = true
br.rw.Unlock() br.rw.Unlock()
// n==0, err==io.EOF
select { select {
case <-br.ctx.Done(): case <-br.ctx.Done():
return 0, br.ctx.Err() return 0, br.ctx.Err()
@ -716,18 +729,34 @@ func (br *Buf) Read(p []byte) (n int, err error) {
continue continue
} }
} }
n := copy(p, br.buf[br.offR:br.offW])
br.offR += n
br.rw.Unlock()
if n < len(p) && br.offR >= br.size {
return n, io.EOF
}
return n, nil
}
} }
func (br *Buf) Write(p []byte) (n int, err error) { func (br *Buf) Write(p []byte) (int, error) {
if err := br.ctx.Err(); err != nil { if err := br.ctx.Err(); err != nil {
return 0, err return 0, err
} }
if len(p) == 0 {
return 0, nil
}
br.rw.Lock() br.rw.Lock()
defer br.rw.Unlock() defer br.rw.Unlock()
if br.buffer == nil { if br.buf == nil {
return 0, io.ErrClosedPipe return 0, io.ErrClosedPipe
} }
n, err = br.buffer.Write(p) if br.offW >= br.size {
return 0, io.ErrShortWrite
}
n := copy(br.buf[br.offW:], p[:min(br.size-br.offW, len(p))])
br.offW += n
if br.readPending { if br.readPending {
br.readPending = false br.readPending = false
select { select {
@ -735,12 +764,21 @@ func (br *Buf) Write(p []byte) (n int, err error) {
default: default:
} }
} }
return if n < len(p) {
return n, io.ErrShortWrite
}
return n, nil
} }
func (br *Buf) Close() { func (br *Buf) Close() error {
br.rw.Lock() br.rw.Lock()
defer br.rw.Unlock() defer br.rw.Unlock()
br.buffer = nil var err error
close(br.readSignal) if br.mmap {
err = mmap.Free(br.buf)
br.mmap = false
}
br.buf = nil
close(br.readSignal)
return err
} }

View File

@ -486,12 +486,18 @@ func Rename(ctx context.Context, storage driver.Driver, srcPath, dstName string,
updateCacheObj(storage, srcDirPath, srcRawObj, model.WrapObjName(newObj)) updateCacheObj(storage, srcDirPath, srcRawObj, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) { } else if !utils.IsBool(lazyCache...) {
DeleteCache(storage, srcDirPath) DeleteCache(storage, srcDirPath)
if srcRawObj.IsDir() {
ClearCache(storage, srcPath)
}
} }
} }
case driver.Rename: case driver.Rename:
err = s.Rename(ctx, srcObj, dstName) err = s.Rename(ctx, srcObj, dstName)
if err == nil && !utils.IsBool(lazyCache...) { if err == nil && !utils.IsBool(lazyCache...) {
DeleteCache(storage, srcDirPath) DeleteCache(storage, srcDirPath)
if srcRawObj.IsDir() {
ClearCache(storage, srcPath)
}
} }
default: default:
return errs.NotImplement return errs.NotImplement

139
internal/op/sharing.go Normal file
View File

@ -0,0 +1,139 @@
package op
import (
"fmt"
stdpath "path"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/db"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/go-cache"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
func makeJoined(sdb []model.SharingDB) []model.Sharing {
creator := make(map[uint]*model.User)
return utils.MustSliceConvert(sdb, func(s model.SharingDB) model.Sharing {
var c *model.User
var ok bool
if c, ok = creator[s.CreatorId]; !ok {
var err error
if c, err = GetUserById(s.CreatorId); err != nil {
c = nil
} else {
creator[s.CreatorId] = c
}
}
var files []string
if err := utils.Json.UnmarshalFromString(s.FilesRaw, &files); err != nil {
files = make([]string, 0)
}
return model.Sharing{
SharingDB: &s,
Files: files,
Creator: c,
}
})
}
var sharingCache = cache.NewMemCache(cache.WithShards[*model.Sharing](8))
var sharingG singleflight.Group[*model.Sharing]
func GetSharingById(id string, refresh ...bool) (*model.Sharing, error) {
if !utils.IsBool(refresh...) {
if sharing, ok := sharingCache.Get(id); ok {
log.Debugf("use cache when get sharing %s", id)
return sharing, nil
}
}
sharing, err, _ := sharingG.Do(id, func() (*model.Sharing, error) {
s, err := db.GetSharingById(id)
if err != nil {
return nil, errors.WithMessagef(err, "failed get sharing [%s]", id)
}
creator, err := GetUserById(s.CreatorId)
if err != nil {
return nil, errors.WithMessagef(err, "failed get sharing creator [%s]", id)
}
var files []string
if err = utils.Json.UnmarshalFromString(s.FilesRaw, &files); err != nil {
files = make([]string, 0)
}
return &model.Sharing{
SharingDB: s,
Files: files,
Creator: creator,
}, nil
})
return sharing, err
}
func GetSharings(pageIndex, pageSize int) ([]model.Sharing, int64, error) {
s, cnt, err := db.GetSharings(pageIndex, pageSize)
if err != nil {
return nil, 0, errors.WithStack(err)
}
return makeJoined(s), cnt, nil
}
func GetSharingsByCreatorId(userId uint, pageIndex, pageSize int) ([]model.Sharing, int64, error) {
s, cnt, err := db.GetSharingsByCreatorId(userId, pageIndex, pageSize)
if err != nil {
return nil, 0, errors.WithStack(err)
}
return makeJoined(s), cnt, nil
}
func GetSharingUnwrapPath(sharing *model.Sharing, path string) (unwrapPath string, err error) {
if len(sharing.Files) == 0 {
return "", errors.New("cannot get actual path of an invalid sharing")
}
if len(sharing.Files) == 1 {
return stdpath.Join(sharing.Files[0], path), nil
}
path = utils.FixAndCleanPath(path)[1:]
if len(path) == 0 {
return "", errors.New("cannot get actual path of a sharing root path")
}
mapPath := ""
child, rest, _ := strings.Cut(path, "/")
for _, c := range sharing.Files {
if child == stdpath.Base(c) {
mapPath = c
break
}
}
if mapPath == "" {
return "", fmt.Errorf("failed find child [%s] of sharing [%s]", child, sharing.ID)
}
return stdpath.Join(mapPath, rest), nil
}
func CreateSharing(sharing *model.Sharing) (id string, err error) {
sharing.CreatorId = sharing.Creator.ID
sharing.FilesRaw, err = utils.Json.MarshalToString(utils.MustSliceConvert(sharing.Files, utils.FixAndCleanPath))
if err != nil {
return "", errors.WithStack(err)
}
return db.CreateSharing(sharing.SharingDB)
}
func UpdateSharing(sharing *model.Sharing, skipMarshal ...bool) (err error) {
if !utils.IsBool(skipMarshal...) {
sharing.CreatorId = sharing.Creator.ID
sharing.FilesRaw, err = utils.Json.MarshalToString(utils.MustSliceConvert(sharing.Files, utils.FixAndCleanPath))
if err != nil {
return errors.WithStack(err)
}
}
sharingCache.Del(sharing.ID)
return db.UpdateSharing(sharing.SharingDB)
}
func DeleteSharing(sid string) error {
sharingCache.Del(sid)
return db.DeleteSharingById(sid)
}

View File

@ -0,0 +1,65 @@
package sharing
import (
"context"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/pkg/errors"
)
func archiveMeta(ctx context.Context, sid, path string, args model.SharingArchiveMetaArgs) (*model.Sharing, *model.ArchiveMetaProvider, error) {
sharing, err := op.GetSharingById(sid, args.Refresh)
if err != nil {
return nil, nil, errors.WithStack(errs.SharingNotFound)
}
if !sharing.Valid() {
return sharing, nil, errors.WithStack(errs.InvalidSharing)
}
if !sharing.Verify(args.Pwd) {
return sharing, nil, errors.WithStack(errs.WrongShareCode)
}
path = utils.FixAndCleanPath(path)
if len(sharing.Files) == 1 || path != "/" {
unwrapPath, err := op.GetSharingUnwrapPath(sharing, path)
if err != nil {
return nil, nil, errors.WithMessage(err, "failed get sharing unwrap path")
}
storage, actualPath, err := op.GetStorageAndActualPath(unwrapPath)
if err != nil {
return nil, nil, errors.WithMessage(err, "failed get sharing file")
}
obj, err := op.GetArchiveMeta(ctx, storage, actualPath, args.ArchiveMetaArgs)
return sharing, obj, err
}
return nil, nil, errors.New("cannot get sharing root archive meta")
}
func archiveList(ctx context.Context, sid, path string, args model.SharingArchiveListArgs) (*model.Sharing, []model.Obj, error) {
sharing, err := op.GetSharingById(sid, args.Refresh)
if err != nil {
return nil, nil, errors.WithStack(errs.SharingNotFound)
}
if !sharing.Valid() {
return sharing, nil, errors.WithStack(errs.InvalidSharing)
}
if !sharing.Verify(args.Pwd) {
return sharing, nil, errors.WithStack(errs.WrongShareCode)
}
path = utils.FixAndCleanPath(path)
if len(sharing.Files) == 1 || path != "/" {
unwrapPath, err := op.GetSharingUnwrapPath(sharing, path)
if err != nil {
return nil, nil, errors.WithMessage(err, "failed get sharing unwrap path")
}
storage, actualPath, err := op.GetStorageAndActualPath(unwrapPath)
if err != nil {
return nil, nil, errors.WithMessage(err, "failed get sharing file")
}
obj, err := op.ListArchive(ctx, storage, actualPath, args.ArchiveListArgs)
return sharing, obj, err
}
return nil, nil, errors.New("cannot get sharing root archive list")
}

60
internal/sharing/get.go Normal file
View File

@ -0,0 +1,60 @@
package sharing
import (
"context"
stdpath "path"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/pkg/errors"
)
func get(ctx context.Context, sid, path string, args model.SharingListArgs) (*model.Sharing, model.Obj, error) {
sharing, err := op.GetSharingById(sid, args.Refresh)
if err != nil {
return nil, nil, errors.WithStack(errs.SharingNotFound)
}
if !sharing.Valid() {
return sharing, nil, errors.WithStack(errs.InvalidSharing)
}
if !sharing.Verify(args.Pwd) {
return sharing, nil, errors.WithStack(errs.WrongShareCode)
}
path = utils.FixAndCleanPath(path)
if len(sharing.Files) == 1 || path != "/" {
unwrapPath, err := op.GetSharingUnwrapPath(sharing, path)
if err != nil {
return nil, nil, errors.WithMessage(err, "failed get sharing unwrap path")
}
if unwrapPath != "/" {
virtualFiles := op.GetStorageVirtualFilesByPath(stdpath.Dir(unwrapPath))
for _, f := range virtualFiles {
if f.GetName() == stdpath.Base(unwrapPath) {
return sharing, f, nil
}
}
} else {
return sharing, &model.Object{
Name: sid,
Size: 0,
Modified: time.Time{},
IsFolder: true,
}, nil
}
storage, actualPath, err := op.GetStorageAndActualPath(unwrapPath)
if err != nil {
return nil, nil, errors.WithMessage(err, "failed get sharing file")
}
obj, err := op.Get(ctx, storage, actualPath)
return sharing, obj, err
}
return sharing, &model.Object{
Name: sid,
Size: 0,
Modified: time.Time{},
IsFolder: true,
}, nil
}

46
internal/sharing/link.go Normal file
View File

@ -0,0 +1,46 @@
package sharing
import (
"context"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
"github.com/pkg/errors"
)
func link(ctx context.Context, sid, path string, args *LinkArgs) (*model.Sharing, *model.Link, model.Obj, error) {
sharing, err := op.GetSharingById(sid, args.SharingListArgs.Refresh)
if err != nil {
return nil, nil, nil, errors.WithStack(errs.SharingNotFound)
}
if !sharing.Valid() {
return sharing, nil, nil, errors.WithStack(errs.InvalidSharing)
}
if !sharing.Verify(args.Pwd) {
return sharing, nil, nil, errors.WithStack(errs.WrongShareCode)
}
path = utils.FixAndCleanPath(path)
if len(sharing.Files) == 1 || path != "/" {
unwrapPath, err := op.GetSharingUnwrapPath(sharing, path)
if err != nil {
return nil, nil, nil, errors.WithMessage(err, "failed get sharing unwrap path")
}
storage, actualPath, err := op.GetStorageAndActualPath(unwrapPath)
if err != nil {
return nil, nil, nil, errors.WithMessage(err, "failed get sharing link")
}
l, obj, err := op.Link(ctx, storage, actualPath, args.LinkArgs)
if err != nil {
return nil, nil, nil, errors.WithMessage(err, "failed get sharing link")
}
if l.URL != "" && !strings.HasPrefix(l.URL, "http://") && !strings.HasPrefix(l.URL, "https://") {
l.URL = common.GetApiUrl(ctx) + l.URL
}
return sharing, l, obj, nil
}
return nil, nil, nil, errors.New("cannot get sharing root link")
}

83
internal/sharing/list.go Normal file
View File

@ -0,0 +1,83 @@
package sharing
import (
"context"
stdpath "path"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/pkg/errors"
)
func list(ctx context.Context, sid, path string, args model.SharingListArgs) (*model.Sharing, []model.Obj, error) {
sharing, err := op.GetSharingById(sid, args.Refresh)
if err != nil {
return nil, nil, errors.WithStack(errs.SharingNotFound)
}
if !sharing.Valid() {
return sharing, nil, errors.WithStack(errs.InvalidSharing)
}
if !sharing.Verify(args.Pwd) {
return sharing, nil, errors.WithStack(errs.WrongShareCode)
}
path = utils.FixAndCleanPath(path)
if len(sharing.Files) == 1 || path != "/" {
unwrapPath, err := op.GetSharingUnwrapPath(sharing, path)
if err != nil {
return nil, nil, errors.WithMessage(err, "failed get sharing unwrap path")
}
virtualFiles := op.GetStorageVirtualFilesByPath(unwrapPath)
storage, actualPath, err := op.GetStorageAndActualPath(unwrapPath)
if err != nil && len(virtualFiles) == 0 {
return nil, nil, errors.WithMessage(err, "failed list sharing")
}
var objs []model.Obj
if storage != nil {
objs, err = op.List(ctx, storage, actualPath, model.ListArgs{
Refresh: args.Refresh,
ReqPath: stdpath.Join(sid, path),
})
if err != nil && len(virtualFiles) == 0 {
return nil, nil, errors.WithMessage(err, "failed list sharing")
}
}
om := model.NewObjMerge()
objs = om.Merge(objs, virtualFiles...)
model.SortFiles(objs, sharing.OrderBy, sharing.OrderDirection)
model.ExtractFolder(objs, sharing.ExtractFolder)
return sharing, objs, nil
}
objs := make([]model.Obj, 0, len(sharing.Files))
for _, f := range sharing.Files {
if f != "/" {
isVf := false
virtualFiles := op.GetStorageVirtualFilesByPath(stdpath.Dir(f))
for _, vf := range virtualFiles {
if vf.GetName() == stdpath.Base(f) {
objs = append(objs, vf)
isVf = true
break
}
}
if isVf {
continue
}
} else {
continue
}
storage, actualPath, err := op.GetStorageAndActualPath(f)
if err != nil {
continue
}
obj, err := op.Get(ctx, storage, actualPath)
if err != nil {
continue
}
objs = append(objs, obj)
}
model.SortFiles(objs, sharing.OrderBy, sharing.OrderDirection)
model.ExtractFolder(objs, sharing.ExtractFolder)
return sharing, objs, nil
}

View File

@ -0,0 +1,58 @@
package sharing
import (
"context"
"github.com/OpenListTeam/OpenList/v4/internal/model"
log "github.com/sirupsen/logrus"
)
func List(ctx context.Context, sid, path string, args model.SharingListArgs) (*model.Sharing, []model.Obj, error) {
sharing, res, err := list(ctx, sid, path, args)
if err != nil {
log.Errorf("failed list sharing %s/%s: %+v", sid, path, err)
return nil, nil, err
}
return sharing, res, nil
}
func Get(ctx context.Context, sid, path string, args model.SharingListArgs) (*model.Sharing, model.Obj, error) {
sharing, res, err := get(ctx, sid, path, args)
if err != nil {
log.Warnf("failed get sharing %s/%s: %s", sid, path, err)
return nil, nil, err
}
return sharing, res, nil
}
func ArchiveMeta(ctx context.Context, sid, path string, args model.SharingArchiveMetaArgs) (*model.Sharing, *model.ArchiveMetaProvider, error) {
sharing, res, err := archiveMeta(ctx, sid, path, args)
if err != nil {
log.Warnf("failed get sharing archive meta %s/%s: %s", sid, path, err)
return nil, nil, err
}
return sharing, res, nil
}
func ArchiveList(ctx context.Context, sid, path string, args model.SharingArchiveListArgs) (*model.Sharing, []model.Obj, error) {
sharing, res, err := archiveList(ctx, sid, path, args)
if err != nil {
log.Warnf("failed list sharing archive %s/%s: %s", sid, path, err)
return nil, nil, err
}
return sharing, res, nil
}
type LinkArgs struct {
model.SharingListArgs
model.LinkArgs
}
func Link(ctx context.Context, sid, path string, args *LinkArgs) (*model.Sharing, *model.Link, model.Obj, error) {
sharing, res, file, err := link(ctx, sid, path, args)
if err != nil {
log.Errorf("failed get sharing link %s/%s: %+v", sid, path, err)
return nil, nil, nil, err
}
return sharing, res, file, nil
}

View File

@ -15,6 +15,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/pkg/buffer" "github.com/OpenListTeam/OpenList/v4/pkg/buffer"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range" "github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/rclone/rclone/lib/mmap"
"go4.org/readerutil" "go4.org/readerutil"
) )
@ -60,8 +61,12 @@ func (f *FileStream) IsForceStreamUpload() bool {
} }
func (f *FileStream) Close() error { func (f *FileStream) Close() error {
var err1, err2 error if f.peekBuff != nil {
f.peekBuff.Reset()
f.peekBuff = nil
}
var err1, err2 error
err1 = f.Closers.Close() err1 = f.Closers.Close()
if errors.Is(err1, os.ErrClosed) { if errors.Is(err1, os.ErrClosed) {
err1 = nil err1 = nil
@ -74,10 +79,6 @@ func (f *FileStream) Close() error {
f.tmpFile = nil f.tmpFile = nil
} }
} }
if f.peekBuff != nil {
f.peekBuff.Reset()
f.peekBuff = nil
}
return errors.Join(err1, err2) return errors.Join(err1, err2)
} }
@ -194,7 +195,19 @@ func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
f.oriReader = f.Reader f.oriReader = f.Reader
} }
bufSize := maxCacheSize - int64(f.peekBuff.Len()) bufSize := maxCacheSize - int64(f.peekBuff.Len())
buf := make([]byte, bufSize) var buf []byte
if conf.MmapThreshold > 0 && bufSize >= int64(conf.MmapThreshold) {
m, err := mmap.Alloc(int(bufSize))
if err == nil {
f.Add(utils.CloseFunc(func() error {
return mmap.Free(m)
}))
buf = m
}
}
if buf == nil {
buf = make([]byte, bufSize)
}
n, err := io.ReadFull(f.oriReader, buf) n, err := io.ReadFull(f.oriReader, buf)
if bufSize != int64(n) { if bufSize != int64(n) {
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err) return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err)

View File

@ -7,11 +7,13 @@ import (
"io" "io"
"testing" "testing"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range" "github.com/OpenListTeam/OpenList/v4/pkg/http_range"
) )
func TestFileStream_RangeRead(t *testing.T) { func TestFileStream_RangeRead(t *testing.T) {
conf.MaxBufferLimit = 16 * 1024 * 1024
type args struct { type args struct {
httpRange http_range.Range httpRange http_range.Range
} }
@ -71,7 +73,7 @@ func TestFileStream_RangeRead(t *testing.T) {
} }
}) })
} }
t.Run("after check", func(t *testing.T) { t.Run("after", func(t *testing.T) {
if f.GetFile() == nil { if f.GetFile() == nil {
t.Error("not cached") t.Error("not cached")
} }

View File

@ -8,13 +8,14 @@ import (
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"sync"
"github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/net" "github.com/OpenListTeam/OpenList/v4/internal/net"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range" "github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/pool"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/rclone/rclone/lib/mmap"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -153,26 +154,49 @@ func CacheFullAndHash(stream model.FileStreamer, up *model.UpdateProgress, hashT
type StreamSectionReader struct { type StreamSectionReader struct {
file model.FileStreamer file model.FileStreamer
off int64 off int64
bufPool *sync.Pool bufPool *pool.Pool[[]byte]
} }
func NewStreamSectionReader(file model.FileStreamer, maxBufferSize int, up *model.UpdateProgress) (*StreamSectionReader, error) { func NewStreamSectionReader(file model.FileStreamer, maxBufferSize int, up *model.UpdateProgress) (*StreamSectionReader, error) {
ss := &StreamSectionReader{file: file} ss := &StreamSectionReader{file: file}
if file.GetFile() == nil { if file.GetFile() != nil {
return ss, nil
}
maxBufferSize = min(maxBufferSize, int(file.GetSize())) maxBufferSize = min(maxBufferSize, int(file.GetSize()))
if maxBufferSize > conf.MaxBufferLimit { if maxBufferSize > conf.MaxBufferLimit {
_, err := file.CacheFullAndWriter(up, nil) _, err := file.CacheFullAndWriter(up, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return ss, nil
}
if conf.MmapThreshold > 0 && maxBufferSize >= conf.MmapThreshold {
ss.bufPool = &pool.Pool[[]byte]{
New: func() []byte {
buf, err := mmap.Alloc(maxBufferSize)
if err == nil {
file.Add(utils.CloseFunc(func() error {
return mmap.Free(buf)
}))
} else { } else {
ss.bufPool = &sync.Pool{ buf = make([]byte, maxBufferSize)
New: func() any { }
return buf
},
}
} else {
ss.bufPool = &pool.Pool[[]byte]{
New: func() []byte {
return make([]byte, maxBufferSize) return make([]byte, maxBufferSize)
}, },
} }
} }
}
file.Add(utils.CloseFunc(func() error {
ss.bufPool.Reset()
return nil
}))
return ss, nil return ss, nil
} }
@ -184,7 +208,7 @@ func (ss *StreamSectionReader) GetSectionReader(off, length int64) (*SectionRead
if off != ss.off { if off != ss.off {
return nil, fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off) return nil, fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off)
} }
tempBuf := ss.bufPool.Get().([]byte) tempBuf := ss.bufPool.Get()
buf = tempBuf[:length] buf = tempBuf[:length]
n, err := io.ReadFull(ss.file, buf) n, err := io.ReadFull(ss.file, buf)
if int64(n) != length { if int64(n) != length {

37
pkg/pool/pool.go Normal file
View File

@ -0,0 +1,37 @@
package pool
import "sync"
type Pool[T any] struct {
New func() T
MaxCap int
cache []T
mu sync.Mutex
}
func (p *Pool[T]) Get() T {
p.mu.Lock()
defer p.mu.Unlock()
if len(p.cache) == 0 {
return p.New()
}
item := p.cache[len(p.cache)-1]
p.cache = p.cache[:len(p.cache)-1]
return item
}
func (p *Pool[T]) Put(item T) {
p.mu.Lock()
defer p.mu.Unlock()
if p.MaxCap == 0 || len(p.cache) < int(p.MaxCap) {
p.cache = append(p.cache, item)
}
}
func (p *Pool[T]) Reset() {
p.mu.Lock()
defer p.mu.Unlock()
clear(p.cache)
p.cache = nil
}

View File

@ -2,6 +2,9 @@ package common
import ( import (
"context" "context"
"fmt"
"html"
"net/http"
"strings" "strings"
"github.com/OpenListTeam/OpenList/v4/cmd/flags" "github.com/OpenListTeam/OpenList/v4/cmd/flags"
@ -38,6 +41,41 @@ func ErrorResp(c *gin.Context, err error, code int, l ...bool) {
//c.Abort() //c.Abort()
} }
// ErrorPage is used to return error page HTML.
// It also returns standard HTTP status code.
// @param l: if true, log error
func ErrorPage(c *gin.Context, err error, code int, l ...bool) {
if len(l) > 0 && l[0] {
if flags.Debug || flags.Dev {
log.Errorf("%+v", err)
} else {
log.Errorf("%v", err)
}
}
codes := fmt.Sprintf("%d %s", code, http.StatusText(code))
html := fmt.Sprintf(`<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<meta name="color-scheme" content="dark light" />
<meta name="robots" content="noindex" />
<title>%s</title>
</head>
<body>
<h1>%s</h1>
<hr>
<p>%s</p>
</body>
</html>`,
codes, codes, html.EscapeString(hidePrivacy(err.Error())))
c.Data(code, "text/html; charset=utf-8", []byte(html))
c.Abort()
}
func ErrorWithDataResp(c *gin.Context, err error, code int, data interface{}, l ...bool) { func ErrorWithDataResp(c *gin.Context, err error, code int, data interface{}, l ...bool) {
if len(l) > 0 && l[0] { if len(l) > 0 && l[0] {
if flags.Debug || flags.Dev { if flags.Debug || flags.Dev {

View File

@ -3,9 +3,9 @@ package handles
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
stdpath "path" stdpath "path"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/task"
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool" "github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
"github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/conf"
@ -15,6 +15,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting" "github.com/OpenListTeam/OpenList/v4/internal/setting"
"github.com/OpenListTeam/OpenList/v4/internal/sign" "github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/internal/task"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common" "github.com/OpenListTeam/OpenList/v4/server/common"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
@ -71,13 +72,26 @@ func toContentResp(objs []model.ObjTree) []ArchiveContentResp {
return ret return ret
} }
func FsArchiveMeta(c *gin.Context) { func FsArchiveMetaSplit(c *gin.Context) {
var req ArchiveMetaReq var req ArchiveMetaReq
if err := c.ShouldBind(&req); err != nil { if err := c.ShouldBind(&req); err != nil {
common.ErrorResp(c, err, 400) common.ErrorResp(c, err, 400)
return return
} }
if strings.HasPrefix(req.Path, "/@s") {
req.Path = strings.TrimPrefix(req.Path, "/@s")
SharingArchiveMeta(c, &req)
return
}
user := c.Request.Context().Value(conf.UserKey).(*model.User) user := c.Request.Context().Value(conf.UserKey).(*model.User)
if user.IsGuest() && user.Disabled {
common.ErrorStrResp(c, "Guest user is disabled, login please", 401)
return
}
FsArchiveMeta(c, &req, user)
}
func FsArchiveMeta(c *gin.Context, req *ArchiveMetaReq, user *model.User) {
if !user.CanReadArchives() { if !user.CanReadArchives() {
common.ErrorResp(c, errs.PermissionDenied, 403) common.ErrorResp(c, errs.PermissionDenied, 403)
return return
@ -142,19 +156,27 @@ type ArchiveListReq struct {
InnerPath string `json:"inner_path" form:"inner_path"` InnerPath string `json:"inner_path" form:"inner_path"`
} }
type ArchiveListResp struct { func FsArchiveListSplit(c *gin.Context) {
Content []ObjResp `json:"content"`
Total int64 `json:"total"`
}
func FsArchiveList(c *gin.Context) {
var req ArchiveListReq var req ArchiveListReq
if err := c.ShouldBind(&req); err != nil { if err := c.ShouldBind(&req); err != nil {
common.ErrorResp(c, err, 400) common.ErrorResp(c, err, 400)
return return
} }
req.Validate() req.Validate()
if strings.HasPrefix(req.Path, "/@s") {
req.Path = strings.TrimPrefix(req.Path, "/@s")
SharingArchiveList(c, &req)
return
}
user := c.Request.Context().Value(conf.UserKey).(*model.User) user := c.Request.Context().Value(conf.UserKey).(*model.User)
if user.IsGuest() && user.Disabled {
common.ErrorStrResp(c, "Guest user is disabled, login please", 401)
return
}
FsArchiveList(c, &req, user)
}
func FsArchiveList(c *gin.Context, req *ArchiveListReq, user *model.User) {
if !user.CanReadArchives() { if !user.CanReadArchives() {
common.ErrorResp(c, errs.PermissionDenied, 403) common.ErrorResp(c, errs.PermissionDenied, 403)
return return
@ -201,7 +223,7 @@ func FsArchiveList(c *gin.Context) {
ret, _ := utils.SliceConvert(objs, func(src model.Obj) (ObjResp, error) { ret, _ := utils.SliceConvert(objs, func(src model.Obj) (ObjResp, error) {
return toObjsRespWithoutSignAndThumb(src), nil return toObjsRespWithoutSignAndThumb(src), nil
}) })
common.SuccessResp(c, ArchiveListResp{ common.SuccessResp(c, common.PageResp{
Content: ret, Content: ret,
Total: int64(total), Total: int64(total),
}) })
@ -298,7 +320,7 @@ func ArchiveDown(c *gin.Context) {
filename := stdpath.Base(innerPath) filename := stdpath.Base(innerPath)
storage, err := fs.GetStorage(archiveRawPath, &fs.GetStoragesArgs{}) storage, err := fs.GetStorage(archiveRawPath, &fs.GetStoragesArgs{})
if err != nil { if err != nil {
common.ErrorResp(c, err, 500) common.ErrorPage(c, err, 500)
return return
} }
if common.ShouldProxy(storage, filename) { if common.ShouldProxy(storage, filename) {
@ -318,7 +340,7 @@ func ArchiveDown(c *gin.Context) {
InnerPath: innerPath, InnerPath: innerPath,
}) })
if err != nil { if err != nil {
common.ErrorResp(c, err, 500) common.ErrorPage(c, err, 500)
return return
} }
redirect(c, link) redirect(c, link)
@ -332,7 +354,7 @@ func ArchiveProxy(c *gin.Context) {
filename := stdpath.Base(innerPath) filename := stdpath.Base(innerPath)
storage, err := fs.GetStorage(archiveRawPath, &fs.GetStoragesArgs{}) storage, err := fs.GetStorage(archiveRawPath, &fs.GetStoragesArgs{})
if err != nil { if err != nil {
common.ErrorResp(c, err, 500) common.ErrorPage(c, err, 500)
return return
} }
if canProxy(storage, filename) { if canProxy(storage, filename) {
@ -348,16 +370,34 @@ func ArchiveProxy(c *gin.Context) {
InnerPath: innerPath, InnerPath: innerPath,
}) })
if err != nil { if err != nil {
common.ErrorResp(c, err, 500) common.ErrorPage(c, err, 500)
return return
} }
proxy(c, link, file, storage.GetStorage().ProxyRange) proxy(c, link, file, storage.GetStorage().ProxyRange)
} else { } else {
common.ErrorStrResp(c, "proxy not allowed", 403) common.ErrorPage(c, errors.New("proxy not allowed"), 403)
return return
} }
} }
func proxyInternalExtract(c *gin.Context, rc io.ReadCloser, size int64, fileName string) {
defer func() {
if err := rc.Close(); err != nil {
log.Errorf("failed to close file streamer, %v", err)
}
}()
headers := map[string]string{
"Referrer-Policy": "no-referrer",
"Cache-Control": "max-age=0, no-cache, no-store, must-revalidate",
}
headers["Content-Disposition"] = utils.GenerateContentDisposition(fileName)
contentType := c.Request.Header.Get("Content-Type")
if contentType == "" {
contentType = utils.GetMimeType(fileName)
}
c.DataFromReader(200, size, contentType, rc, headers)
}
func ArchiveInternalExtract(c *gin.Context) { func ArchiveInternalExtract(c *gin.Context) {
archiveRawPath := c.Request.Context().Value(conf.PathKey).(string) archiveRawPath := c.Request.Context().Value(conf.PathKey).(string)
innerPath := utils.FixAndCleanPath(c.Query("inner")) innerPath := utils.FixAndCleanPath(c.Query("inner"))
@ -373,25 +413,11 @@ func ArchiveInternalExtract(c *gin.Context) {
InnerPath: innerPath, InnerPath: innerPath,
}) })
if err != nil { if err != nil {
common.ErrorResp(c, err, 500) common.ErrorPage(c, err, 500)
return return
} }
defer func() {
if err := rc.Close(); err != nil {
log.Errorf("failed to close file streamer, %v", err)
}
}()
headers := map[string]string{
"Referrer-Policy": "no-referrer",
"Cache-Control": "max-age=0, no-cache, no-store, must-revalidate",
}
fileName := stdpath.Base(innerPath) fileName := stdpath.Base(innerPath)
headers["Content-Disposition"] = utils.GenerateContentDisposition(fileName) proxyInternalExtract(c, rc, size, fileName)
contentType := c.Request.Header.Get("Content-Type")
if contentType == "" {
contentType = utils.GetMimeType(fileName)
}
c.DataFromReader(200, size, contentType, rc, headers)
} }
func ArchiveExtensions(c *gin.Context) { func ArchiveExtensions(c *gin.Context) {

View File

@ -26,7 +26,7 @@ func Down(c *gin.Context) {
filename := stdpath.Base(rawPath) filename := stdpath.Base(rawPath)
storage, err := fs.GetStorage(rawPath, &fs.GetStoragesArgs{}) storage, err := fs.GetStorage(rawPath, &fs.GetStoragesArgs{})
if err != nil { if err != nil {
common.ErrorResp(c, err, 500) common.ErrorPage(c, err, 500)
return return
} }
if common.ShouldProxy(storage, filename) { if common.ShouldProxy(storage, filename) {
@ -40,7 +40,7 @@ func Down(c *gin.Context) {
Redirect: true, Redirect: true,
}) })
if err != nil { if err != nil {
common.ErrorResp(c, err, 500) common.ErrorPage(c, err, 500)
return return
} }
redirect(c, link) redirect(c, link)
@ -52,7 +52,7 @@ func Proxy(c *gin.Context) {
filename := stdpath.Base(rawPath) filename := stdpath.Base(rawPath)
storage, err := fs.GetStorage(rawPath, &fs.GetStoragesArgs{}) storage, err := fs.GetStorage(rawPath, &fs.GetStoragesArgs{})
if err != nil { if err != nil {
common.ErrorResp(c, err, 500) common.ErrorPage(c, err, 500)
return return
} }
if canProxy(storage, filename) { if canProxy(storage, filename) {
@ -67,12 +67,12 @@ func Proxy(c *gin.Context) {
Type: c.Query("type"), Type: c.Query("type"),
}) })
if err != nil { if err != nil {
common.ErrorResp(c, err, 500) common.ErrorPage(c, err, 500)
return return
} }
proxy(c, link, file, storage.GetStorage().ProxyRange) proxy(c, link, file, storage.GetStorage().ProxyRange)
} else { } else {
common.ErrorStrResp(c, "proxy not allowed", 403) common.ErrorPage(c, errors.New("proxy not allowed"), 403)
return return
} }
} }
@ -89,7 +89,7 @@ func redirect(c *gin.Context, link *model.Link) {
} }
link.URL, err = utils.InjectQuery(link.URL, query) link.URL, err = utils.InjectQuery(link.URL, query)
if err != nil { if err != nil {
common.ErrorResp(c, err, 500) common.ErrorPage(c, err, 500)
return return
} }
} }
@ -106,7 +106,7 @@ func proxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) {
} }
link.URL, err = utils.InjectQuery(link.URL, query) link.URL, err = utils.InjectQuery(link.URL, query)
if err != nil { if err != nil {
common.ErrorResp(c, err, 500) common.ErrorPage(c, err, 500)
return return
} }
} }
@ -114,9 +114,8 @@ func proxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) {
link = common.ProxyRange(c, link, file.GetSize()) link = common.ProxyRange(c, link, file.GetSize())
} }
Writer := &common.WrittenResponseWriter{ResponseWriter: c.Writer} Writer := &common.WrittenResponseWriter{ResponseWriter: c.Writer}
raw, _ := strconv.ParseBool(c.DefaultQuery("raw", "false"))
//优先处理md文件 if utils.Ext(file.GetName()) == "md" && setting.GetBool(conf.FilterReadMeScripts) && !raw {
if utils.Ext(file.GetName()) == "md" && setting.GetBool(conf.FilterReadMeScripts) {
buf := bytes.NewBuffer(make([]byte, 0, file.GetSize())) buf := bytes.NewBuffer(make([]byte, 0, file.GetSize()))
w := &common.InterceptResponseWriter{ResponseWriter: Writer, Writer: buf} w := &common.InterceptResponseWriter{ResponseWriter: Writer, Writer: buf}
err = common.Proxy(w, c.Request, link, file) err = common.Proxy(w, c.Request, link, file)
@ -149,9 +148,9 @@ func proxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) {
log.Errorf("%s %s local proxy error: %+v", c.Request.Method, c.Request.URL.Path, err) log.Errorf("%s %s local proxy error: %+v", c.Request.Method, c.Request.URL.Path, err)
} else { } else {
if statusCode, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok { if statusCode, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok {
common.ErrorResp(c, err, int(statusCode), true) common.ErrorPage(c, err, int(statusCode), true)
} else { } else {
common.ErrorResp(c, err, 500, true) common.ErrorPage(c, err, 500, true)
} }
} }
} }

View File

@ -56,14 +56,27 @@ type FsListResp struct {
Provider string `json:"provider"` Provider string `json:"provider"`
} }
func FsList(c *gin.Context) { func FsListSplit(c *gin.Context) {
var req ListReq var req ListReq
if err := c.ShouldBind(&req); err != nil { if err := c.ShouldBind(&req); err != nil {
common.ErrorResp(c, err, 400) common.ErrorResp(c, err, 400)
return return
} }
req.Validate() req.Validate()
if strings.HasPrefix(req.Path, "/@s") {
req.Path = strings.TrimPrefix(req.Path, "/@s")
SharingList(c, &req)
return
}
user := c.Request.Context().Value(conf.UserKey).(*model.User) user := c.Request.Context().Value(conf.UserKey).(*model.User)
if user.IsGuest() && user.Disabled {
common.ErrorStrResp(c, "Guest user is disabled, login please", 401)
return
}
FsList(c, &req, user)
}
func FsList(c *gin.Context, req *ListReq, user *model.User) {
reqPath, err := user.JoinPath(req.Path) reqPath, err := user.JoinPath(req.Path)
if err != nil { if err != nil {
common.ErrorResp(c, err, 403) common.ErrorResp(c, err, 403)
@ -243,13 +256,26 @@ type FsGetResp struct {
Related []ObjResp `json:"related"` Related []ObjResp `json:"related"`
} }
func FsGet(c *gin.Context) { func FsGetSplit(c *gin.Context) {
var req FsGetReq var req FsGetReq
if err := c.ShouldBind(&req); err != nil { if err := c.ShouldBind(&req); err != nil {
common.ErrorResp(c, err, 400) common.ErrorResp(c, err, 400)
return return
} }
if strings.HasPrefix(req.Path, "/@s") {
req.Path = strings.TrimPrefix(req.Path, "/@s")
SharingGet(c, &req)
return
}
user := c.Request.Context().Value(conf.UserKey).(*model.User) user := c.Request.Context().Value(conf.UserKey).(*model.User)
if user.IsGuest() && user.Disabled {
common.ErrorStrResp(c, "Guest user is disabled, login please", 401)
return
}
FsGet(c, &req, user)
}
func FsGet(c *gin.Context, req *FsGetReq, user *model.User) {
reqPath, err := user.JoinPath(req.Path) reqPath, err := user.JoinPath(req.Path)
if err != nil { if err != nil {
common.ErrorResp(c, err, 403) common.ErrorResp(c, err, 403)

577
server/handles/sharing.go Normal file
View File

@ -0,0 +1,577 @@
package handles
import (
"fmt"
stdpath "path"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
"github.com/OpenListTeam/OpenList/v4/internal/sharing"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
"github.com/OpenListTeam/go-cache"
"github.com/gin-gonic/gin"
"github.com/pkg/errors"
)
func SharingGet(c *gin.Context, req *FsGetReq) {
sid, path, _ := strings.Cut(strings.TrimPrefix(req.Path, "/"), "/")
if sid == "" {
common.ErrorStrResp(c, "invalid share id", 400)
return
}
s, obj, err := sharing.Get(c.Request.Context(), sid, path, model.SharingListArgs{
Refresh: false,
Pwd: req.Password,
})
if dealError(c, err) {
return
}
_ = countAccess(c.ClientIP(), s)
fakePath := fmt.Sprintf("/%s/%s", sid, path)
url := ""
if !obj.IsDir() {
url = fmt.Sprintf("%s/sd%s", common.GetApiUrl(c), utils.EncodePath(fakePath, true))
if s.Pwd != "" {
url += "?pwd=" + s.Pwd
}
}
thumb, _ := model.GetThumb(obj)
common.SuccessResp(c, FsGetResp{
ObjResp: ObjResp{
Id: "",
Path: fakePath,
Name: obj.GetName(),
Size: obj.GetSize(),
IsDir: obj.IsDir(),
Modified: obj.ModTime(),
Created: obj.CreateTime(),
HashInfoStr: obj.GetHash().String(),
HashInfo: obj.GetHash().Export(),
Sign: "",
Type: utils.GetFileType(obj.GetName()),
Thumb: thumb,
},
RawURL: url,
Readme: s.Readme,
Header: s.Header,
Provider: "unknown",
Related: nil,
})
}
func SharingList(c *gin.Context, req *ListReq) {
sid, path, _ := strings.Cut(strings.TrimPrefix(req.Path, "/"), "/")
if sid == "" {
common.ErrorStrResp(c, "invalid share id", 400)
return
}
s, objs, err := sharing.List(c.Request.Context(), sid, path, model.SharingListArgs{
Refresh: req.Refresh,
Pwd: req.Password,
})
if dealError(c, err) {
return
}
_ = countAccess(c.ClientIP(), s)
fakePath := fmt.Sprintf("/%s/%s", sid, path)
total, objs := pagination(objs, &req.PageReq)
common.SuccessResp(c, FsListResp{
Content: utils.MustSliceConvert(objs, func(obj model.Obj) ObjResp {
thumb, _ := model.GetThumb(obj)
return ObjResp{
Id: "",
Path: stdpath.Join(fakePath, obj.GetName()),
Name: obj.GetName(),
Size: obj.GetSize(),
IsDir: obj.IsDir(),
Modified: obj.ModTime(),
Created: obj.CreateTime(),
HashInfoStr: obj.GetHash().String(),
HashInfo: obj.GetHash().Export(),
Sign: "",
Thumb: thumb,
Type: utils.GetObjType(obj.GetName(), obj.IsDir()),
}
}),
Total: int64(total),
Readme: s.Readme,
Header: s.Header,
Write: false,
Provider: "unknown",
})
}
func SharingArchiveMeta(c *gin.Context, req *ArchiveMetaReq) {
if !setting.GetBool(conf.ShareArchivePreview) {
common.ErrorStrResp(c, "sharing archives previewing is not allowed", 403)
return
}
sid, path, _ := strings.Cut(strings.TrimPrefix(req.Path, "/"), "/")
if sid == "" {
common.ErrorStrResp(c, "invalid share id", 400)
return
}
archiveArgs := model.ArchiveArgs{
LinkArgs: model.LinkArgs{
Header: c.Request.Header,
Type: c.Query("type"),
},
Password: req.ArchivePass,
}
s, ret, err := sharing.ArchiveMeta(c.Request.Context(), sid, path, model.SharingArchiveMetaArgs{
ArchiveMetaArgs: model.ArchiveMetaArgs{
ArchiveArgs: archiveArgs,
Refresh: req.Refresh,
},
Pwd: req.Password,
})
if dealError(c, err) {
return
}
_ = countAccess(c.ClientIP(), s)
fakePath := fmt.Sprintf("/%s/%s", sid, path)
url := fmt.Sprintf("%s/sad%s", common.GetApiUrl(c), utils.EncodePath(fakePath, true))
if s.Pwd != "" {
url += "?pwd=" + s.Pwd
}
common.SuccessResp(c, ArchiveMetaResp{
Comment: ret.GetComment(),
IsEncrypted: ret.IsEncrypted(),
Content: toContentResp(ret.GetTree()),
Sort: ret.Sort,
RawURL: url,
Sign: "",
})
}
func SharingArchiveList(c *gin.Context, req *ArchiveListReq) {
if !setting.GetBool(conf.ShareArchivePreview) {
common.ErrorStrResp(c, "sharing archives previewing is not allowed", 403)
return
}
sid, path, _ := strings.Cut(strings.TrimPrefix(req.Path, "/"), "/")
if sid == "" {
common.ErrorStrResp(c, "invalid share id", 400)
return
}
innerArgs := model.ArchiveInnerArgs{
ArchiveArgs: model.ArchiveArgs{
LinkArgs: model.LinkArgs{
Header: c.Request.Header,
Type: c.Query("type"),
},
Password: req.ArchivePass,
},
InnerPath: utils.FixAndCleanPath(req.InnerPath),
}
s, objs, err := sharing.ArchiveList(c.Request.Context(), sid, path, model.SharingArchiveListArgs{
ArchiveListArgs: model.ArchiveListArgs{
ArchiveInnerArgs: innerArgs,
Refresh: req.Refresh,
},
Pwd: req.Password,
})
if dealError(c, err) {
return
}
_ = countAccess(c.ClientIP(), s)
total, objs := pagination(objs, &req.PageReq)
ret, _ := utils.SliceConvert(objs, func(src model.Obj) (ObjResp, error) {
return toObjsRespWithoutSignAndThumb(src), nil
})
common.SuccessResp(c, common.PageResp{
Content: ret,
Total: int64(total),
})
}
func SharingDown(c *gin.Context) {
sid := c.Request.Context().Value(conf.SharingIDKey).(string)
path := c.Request.Context().Value(conf.PathKey).(string)
path = utils.FixAndCleanPath(path)
pwd := c.Query("pwd")
s, err := op.GetSharingById(sid)
if err == nil {
if !s.Valid() {
err = errs.InvalidSharing
} else if !s.Verify(pwd) {
err = errs.WrongShareCode
} else if len(s.Files) != 1 && path == "/" {
err = errors.New("cannot get sharing root link")
}
}
if dealErrorPage(c, err) {
return
}
unwrapPath, err := op.GetSharingUnwrapPath(s, path)
if err != nil {
common.ErrorPage(c, errors.New("failed get sharing unwrap path"), 500)
return
}
storage, actualPath, err := op.GetStorageAndActualPath(unwrapPath)
if dealErrorPage(c, err) {
return
}
if setting.GetBool(conf.ShareForceProxy) || common.ShouldProxy(storage, stdpath.Base(actualPath)) {
if _, ok := c.GetQuery("d"); !ok {
if url := common.GenerateDownProxyURL(storage.GetStorage(), unwrapPath); url != "" {
c.Redirect(302, url)
_ = countAccess(c.ClientIP(), s)
return
}
}
link, obj, err := op.Link(c.Request.Context(), storage, actualPath, model.LinkArgs{
Header: c.Request.Header,
Type: c.Query("type"),
})
if err != nil {
common.ErrorPage(c, errors.WithMessage(err, "failed get sharing link"), 500)
return
}
_ = countAccess(c.ClientIP(), s)
proxy(c, link, obj, storage.GetStorage().ProxyRange)
} else {
link, _, err := op.Link(c.Request.Context(), storage, actualPath, model.LinkArgs{
IP: c.ClientIP(),
Header: c.Request.Header,
Type: c.Query("type"),
Redirect: true,
})
if err != nil {
common.ErrorPage(c, errors.WithMessage(err, "failed get sharing link"), 500)
return
}
_ = countAccess(c.ClientIP(), s)
redirect(c, link)
}
}
func SharingArchiveExtract(c *gin.Context) {
if !setting.GetBool(conf.ShareArchivePreview) {
common.ErrorPage(c, errors.New("sharing archives previewing is not allowed"), 403)
return
}
sid := c.Request.Context().Value(conf.SharingIDKey).(string)
path := c.Request.Context().Value(conf.PathKey).(string)
path = utils.FixAndCleanPath(path)
pwd := c.Query("pwd")
innerPath := utils.FixAndCleanPath(c.Query("inner"))
archivePass := c.Query("pass")
s, err := op.GetSharingById(sid)
if err == nil {
if !s.Valid() {
err = errs.InvalidSharing
} else if !s.Verify(pwd) {
err = errs.WrongShareCode
} else if len(s.Files) != 1 && path == "/" {
err = errors.New("cannot extract sharing root")
}
}
if dealErrorPage(c, err) {
return
}
unwrapPath, err := op.GetSharingUnwrapPath(s, path)
if err != nil {
common.ErrorPage(c, errors.New("failed get sharing unwrap path"), 500)
return
}
storage, actualPath, err := op.GetStorageAndActualPath(unwrapPath)
if dealErrorPage(c, err) {
return
}
args := model.ArchiveInnerArgs{
ArchiveArgs: model.ArchiveArgs{
LinkArgs: model.LinkArgs{
Header: c.Request.Header,
Type: c.Query("type"),
},
Password: archivePass,
},
InnerPath: innerPath,
}
if _, ok := storage.(driver.ArchiveReader); ok {
if setting.GetBool(conf.ShareForceProxy) || common.ShouldProxy(storage, stdpath.Base(actualPath)) {
link, obj, err := op.DriverExtract(c.Request.Context(), storage, actualPath, args)
if dealErrorPage(c, err) {
return
}
proxy(c, link, obj, storage.GetStorage().ProxyRange)
} else {
args.Redirect = true
link, _, err := op.DriverExtract(c.Request.Context(), storage, actualPath, args)
if dealErrorPage(c, err) {
return
}
redirect(c, link)
}
} else {
rc, size, err := op.InternalExtract(c.Request.Context(), storage, actualPath, args)
if dealErrorPage(c, err) {
return
}
fileName := stdpath.Base(innerPath)
proxyInternalExtract(c, rc, size, fileName)
}
}
func dealError(c *gin.Context, err error) bool {
if err == nil {
return false
} else if errors.Is(err, errs.SharingNotFound) {
common.ErrorStrResp(c, "the share does not exist", 500)
} else if errors.Is(err, errs.InvalidSharing) {
common.ErrorStrResp(c, "the share has expired or is no longer valid", 500)
} else if errors.Is(err, errs.WrongShareCode) {
common.ErrorResp(c, err, 403)
} else if errors.Is(err, errs.WrongArchivePassword) {
common.ErrorResp(c, err, 202)
} else {
common.ErrorResp(c, err, 500)
}
return true
}
func dealErrorPage(c *gin.Context, err error) bool {
if err == nil {
return false
} else if errors.Is(err, errs.SharingNotFound) {
common.ErrorPage(c, errors.New("the share does not exist"), 500)
} else if errors.Is(err, errs.InvalidSharing) {
common.ErrorPage(c, errors.New("the share has expired or is no longer valid"), 500)
} else if errors.Is(err, errs.WrongShareCode) {
common.ErrorPage(c, err, 403)
} else if errors.Is(err, errs.WrongArchivePassword) {
common.ErrorPage(c, err, 202)
} else {
common.ErrorPage(c, err, 500)
}
return true
}
type SharingResp struct {
*model.Sharing
CreatorName string `json:"creator"`
CreatorRole int `json:"creator_role"`
}
func GetSharing(c *gin.Context) {
sid := c.Query("id")
user := c.Request.Context().Value(conf.UserKey).(*model.User)
s, err := op.GetSharingById(sid)
if err != nil || (!user.IsAdmin() && s.Creator.ID != user.ID) {
common.ErrorStrResp(c, "sharing not found", 404)
return
}
common.SuccessResp(c, SharingResp{
Sharing: s,
CreatorName: s.Creator.Username,
CreatorRole: s.Creator.Role,
})
}
func ListSharings(c *gin.Context) {
var req model.PageReq
if err := c.ShouldBind(&req); err != nil {
common.ErrorResp(c, err, 400)
return
}
req.Validate()
user := c.Request.Context().Value(conf.UserKey).(*model.User)
var sharings []model.Sharing
var total int64
var err error
if user.IsAdmin() {
sharings, total, err = op.GetSharings(req.Page, req.PerPage)
} else {
sharings, total, err = op.GetSharingsByCreatorId(user.ID, req.Page, req.PerPage)
}
if err != nil {
common.ErrorResp(c, err, 500, true)
return
}
common.SuccessResp(c, common.PageResp{
Content: utils.MustSliceConvert(sharings, func(s model.Sharing) SharingResp {
return SharingResp{
Sharing: &s,
CreatorName: s.Creator.Username,
CreatorRole: s.Creator.Role,
}
}),
Total: total,
})
}
type CreateSharingReq struct {
Files []string `json:"files"`
Expires *time.Time `json:"expires"`
Pwd string `json:"pwd"`
MaxAccessed int `json:"max_accessed"`
Disabled bool `json:"disabled"`
Remark string `json:"remark"`
Readme string `json:"readme"`
Header string `json:"header"`
model.Sort
}
type UpdateSharingReq struct {
ID string `json:"id"`
Accessed int `json:"accessed"`
CreateSharingReq
}
func UpdateSharing(c *gin.Context) {
var req UpdateSharingReq
if err := c.ShouldBind(&req); err != nil {
common.ErrorResp(c, err, 400)
return
}
if len(req.Files) == 0 || (len(req.Files) == 1 && req.Files[0] == "") {
common.ErrorStrResp(c, "must add at least 1 object", 400)
return
}
user := c.Request.Context().Value(conf.UserKey).(*model.User)
if !user.CanShare() {
common.ErrorStrResp(c, "permission denied", 403)
return
}
for i, s := range req.Files {
s = utils.FixAndCleanPath(s)
req.Files[i] = s
if !user.IsAdmin() && !strings.HasPrefix(s, user.BasePath) {
common.ErrorStrResp(c, fmt.Sprintf("permission denied to share path [%s]", s), 500)
return
}
}
s, err := op.GetSharingById(req.ID)
if err != nil || (!user.IsAdmin() && s.CreatorId != user.ID) {
common.ErrorStrResp(c, "sharing not found", 404)
return
}
s.Files = req.Files
s.Expires = req.Expires
s.Pwd = req.Pwd
s.Accessed = req.Accessed
s.MaxAccessed = req.MaxAccessed
s.Disabled = req.Disabled
s.Sort = req.Sort
s.Header = req.Header
s.Readme = req.Readme
s.Remark = req.Remark
if err = op.UpdateSharing(s); err != nil {
common.ErrorResp(c, err, 500)
} else {
common.SuccessResp(c, SharingResp{
Sharing: s,
CreatorName: s.Creator.Username,
CreatorRole: s.Creator.Role,
})
}
}
func CreateSharing(c *gin.Context) {
var req CreateSharingReq
var err error
if err = c.ShouldBind(&req); err != nil {
common.ErrorResp(c, err, 400)
return
}
if len(req.Files) == 0 || (len(req.Files) == 1 && req.Files[0] == "") {
common.ErrorStrResp(c, "must add at least 1 object", 400)
return
}
user := c.Request.Context().Value(conf.UserKey).(*model.User)
if !user.CanShare() {
common.ErrorStrResp(c, "permission denied", 403)
return
}
for i, s := range req.Files {
s = utils.FixAndCleanPath(s)
req.Files[i] = s
if !user.IsAdmin() && !strings.HasPrefix(s, user.BasePath) {
common.ErrorStrResp(c, fmt.Sprintf("permission denied to share path [%s]", s), 500)
return
}
}
s := &model.Sharing{
SharingDB: &model.SharingDB{
Expires: req.Expires,
Pwd: req.Pwd,
Accessed: 0,
MaxAccessed: req.MaxAccessed,
Disabled: req.Disabled,
Sort: req.Sort,
Remark: req.Remark,
Readme: req.Readme,
Header: req.Header,
},
Files: req.Files,
Creator: user,
}
var id string
if id, err = op.CreateSharing(s); err != nil {
common.ErrorResp(c, err, 500)
} else {
s.ID = id
common.SuccessResp(c, SharingResp{
Sharing: s,
CreatorName: s.Creator.Username,
CreatorRole: s.Creator.Role,
})
}
}
func DeleteSharing(c *gin.Context) {
sid := c.Query("id")
user := c.Request.Context().Value(conf.UserKey).(*model.User)
s, err := op.GetSharingById(sid)
if err != nil || (!user.IsAdmin() && s.CreatorId != user.ID) {
common.ErrorResp(c, err, 404)
return
}
if err = op.DeleteSharing(sid); err != nil {
common.ErrorResp(c, err, 500)
} else {
common.SuccessResp(c)
}
}
func SetEnableSharing(disable bool) func(ctx *gin.Context) {
return func(c *gin.Context) {
sid := c.Query("id")
user := c.Request.Context().Value(conf.UserKey).(*model.User)
s, err := op.GetSharingById(sid)
if err != nil || (!user.IsAdmin() && s.CreatorId != user.ID) {
common.ErrorStrResp(c, "sharing not found", 404)
return
}
s.Disabled = disable
if err = op.UpdateSharing(s, true); err != nil {
common.ErrorResp(c, err, 500)
} else {
common.SuccessResp(c)
}
}
}
var (
AccessCache = cache.NewMemCache[interface{}]()
AccessCountDelay = 30 * time.Minute
)
func countAccess(ip string, s *model.Sharing) error {
key := fmt.Sprintf("%s:%s", s.ID, ip)
_, ok := AccessCache.Get(key)
if !ok {
AccessCache.Set(key, struct{}{}, cache.WithEx[interface{}](AccessCountDelay))
s.Accessed += 1
return op.UpdateSharing(s, true)
}
return nil
}

View File

@ -14,7 +14,8 @@ import (
// Auth is a middleware that checks if the user is logged in. // Auth is a middleware that checks if the user is logged in.
// if token is empty, set user to guest // if token is empty, set user to guest
func Auth(c *gin.Context) { func Auth(allowDisabledGuest bool) func(c *gin.Context) {
return func(c *gin.Context) {
token := c.GetHeader("Authorization") token := c.GetHeader("Authorization")
if subtle.ConstantTimeCompare([]byte(token), []byte(setting.GetStr(conf.Token))) == 1 { if subtle.ConstantTimeCompare([]byte(token), []byte(setting.GetStr(conf.Token))) == 1 {
admin, err := op.GetAdmin() admin, err := op.GetAdmin()
@ -35,7 +36,7 @@ func Auth(c *gin.Context) {
c.Abort() c.Abort()
return return
} }
if guest.Disabled { if !allowDisabledGuest && guest.Disabled {
common.ErrorStrResp(c, "Guest user is disabled, login please", 401) common.ErrorStrResp(c, "Guest user is disabled, login please", 401)
c.Abort() c.Abort()
return return
@ -72,6 +73,7 @@ func Auth(c *gin.Context) {
log.Debugf("use login token: %+v", user) log.Debugf("use login token: %+v", user)
c.Next() c.Next()
} }
}
func Authn(c *gin.Context) { func Authn(c *gin.Context) {
token := c.GetHeader("Authorization") token := c.GetHeader("Authorization")

View File

@ -15,14 +15,19 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
func Down(verifyFunc func(string, string) error) func(c *gin.Context) { func PathParse(c *gin.Context) {
return func(c *gin.Context) {
rawPath := parsePath(c.Param("path")) rawPath := parsePath(c.Param("path"))
common.GinWithValue(c, conf.PathKey, rawPath) common.GinWithValue(c, conf.PathKey, rawPath)
c.Next()
}
func Down(verifyFunc func(string, string) error) func(c *gin.Context) {
return func(c *gin.Context) {
rawPath := c.Request.Context().Value(conf.PathKey).(string)
meta, err := op.GetNearestMeta(rawPath) meta, err := op.GetNearestMeta(rawPath)
if err != nil { if err != nil {
if !errors.Is(errors.Cause(err), errs.MetaNotFound) { if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
common.ErrorResp(c, err, 500, true) common.ErrorPage(c, err, 500, true)
return return
} }
} }
@ -32,7 +37,7 @@ func Down(verifyFunc func(string, string) error) func(c *gin.Context) {
s := c.Query("sign") s := c.Query("sign")
err = verifyFunc(rawPath, strings.TrimSuffix(s, "/")) err = verifyFunc(rawPath, strings.TrimSuffix(s, "/"))
if err != nil { if err != nil {
common.ErrorResp(c, err, 401) common.ErrorPage(c, err, 401)
c.Abort() c.Abort()
return return
} }

View File

@ -0,0 +1,18 @@
package middlewares
import (
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/server/common"
"github.com/gin-gonic/gin"
)
func SharingIdParse(c *gin.Context) {
sid := c.Param("sid")
common.GinWithValue(c, conf.SharingIDKey, sid)
c.Next()
}
func EmptyPathParse(c *gin.Context) {
common.GinWithValue(c, conf.PathKey, "/")
c.Next()
}

Some files were not shown because too many files have changed in this diff Show More