mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-09-20 04:36:09 +08:00
Compare commits
177 Commits
Author | SHA1 | Date | |
---|---|---|---|
3936e736e6 | |||
68433d4f5b | |||
cc16cb35bf | |||
d3bc6321f4 | |||
cbbb5ad231 | |||
c1d03c5bcc | |||
61a8ed515f | |||
bbb7c06504 | |||
8bbdb272d4 | |||
c15ae94307 | |||
f1a5048558 | |||
1fe26bff9a | |||
433dcd156b | |||
e97f0a289e | |||
89f35170b3 | |||
8188fb2d7d | |||
87cf95f50b | |||
8ab26cb823 | |||
5880c8e1af | |||
14bf4ecb4c | |||
04a5e58781 | |||
bbd4389345 | |||
f350ccdf95 | |||
4f2de9395e | |||
b0dbbebfb0 | |||
0c27b4bd47 | |||
736cd9e5f2 | |||
c7a603c926 | |||
a28d6d5693 | |||
e59d2233e2 | |||
01914a06ef | |||
6499374d1c | |||
b054919d5c | |||
048ee9c2e5 | |||
23394548ca | |||
b04677b806 | |||
e4c902dd93 | |||
5d8bd258c0 | |||
08c5283c8c | |||
10a14f10cd | |||
f86ebc52a0 | |||
016ed90efa | |||
d76407b201 | |||
5de6b660f2 | |||
71ada3b656 | |||
dc42f0e226 | |||
74bf9f6467 | |||
d0c22a1ecb | |||
57fceabcf4 | |||
8c244a984d | |||
df479ba806 | |||
5ae8e96237 | |||
aa0ced47b0 | |||
ab747d9052 | |||
93c06213d4 | |||
b9b8eed285 | |||
317d190b77 | |||
52d7d819ad | |||
0483e0f868 | |||
08dae4f55f | |||
9ac0484bc0 | |||
8cf15183a0 | |||
c8f2aaaa55 | |||
1208bd0a83 | |||
6b096bcad4 | |||
58dbf088f9 | |||
05ff7908f2 | |||
a703b736c9 | |||
e458f2ab53 | |||
a5a22e7085 | |||
9469c95b14 | |||
cf912dcf7a | |||
ccd4af26e5 | |||
1682e873d6 | |||
54ae7e6d9b | |||
991da7d87f | |||
a498091aef | |||
976c82bb2b | |||
5b41a3bdff | |||
19d1a3b785 | |||
3c7b0c4999 | |||
d6867b4ab6 | |||
11cf561307 | |||
239b58f63e | |||
7da06655cb | |||
e0b3a611ba | |||
be1ad08a83 | |||
4e9c30f49d | |||
0ee31a3f36 | |||
23bddf991e | |||
da8d6607cf | |||
6134574dac | |||
b273232f87 | |||
358e4d851e | |||
e8a1ed638a | |||
4106e2a996 | |||
c2271df64e | |||
d4b8570eb8 | |||
bd297e8ccc | |||
923d282c8a | |||
4d8c4d7089 | |||
e93ab76036 | |||
a9f02ecdac | |||
93849a3b5b | |||
c2e0d0c9ce | |||
4a713363ee | |||
3da8ccb7a7 | |||
676b8cff0b | |||
57cf28fc90 | |||
8cf90e074d | |||
74c2ed8306 | |||
5f03edd683 | |||
8b65c918d4 | |||
b5f0e3e5ee | |||
179894ff37 | |||
e2fc89c637 | |||
cacf67b181 | |||
afb043e1d6 | |||
d9debb81ad | |||
4c069fddd6 | |||
b450a2104d | |||
7d0de17daf | |||
bba4fb2203 | |||
a20c2020f8 | |||
a92b5eb929 | |||
6817494a41 | |||
5a0d8ee1b8 | |||
012e51c551 | |||
59ec1dbc9b | |||
6bb28d13f9 | |||
811a862288 | |||
74d32fd4d7 | |||
cedb3d488d | |||
86324d2d6b | |||
648079ae24 | |||
e8d45398d6 | |||
0c461991f9 | |||
2a4c546a8b | |||
750d4eb3f6 | |||
cc01b410a4 | |||
e5fbe72581 | |||
283f3723d1 | |||
ad8c7b37a1 | |||
a84ffb96e9 | |||
19c6b6f930 | |||
eed3c0533c | |||
c72ba9828a | |||
4965a1b909 | |||
1bba550469 | |||
d678322b18 | |||
efd8897bdf | |||
7c7cec0993 | |||
3838ef0663 | |||
9e610af114 | |||
0177177238 | |||
a77e515c9b | |||
4af16ab009 | |||
da35423198 | |||
9612d61e60 | |||
92f396df10 | |||
9557834342 | |||
288ba2fcda | |||
f3920b02f7 | |||
2ec9dad3db | |||
e11227fe2d | |||
859931b78c | |||
b591524ac3 | |||
dc26b4fce5 | |||
f8cf02a2da | |||
a214e794f4 | |||
54d761b371 | |||
bea7a9b0e4 | |||
a46f4cff18 | |||
8eb2d600c7 | |||
ffb6c2a180 | |||
8e19a0fb07 | |||
79f4f96217 |
6
.github/ISSUE_TEMPLATE/00-bug_report_zh.yml
vendored
6
.github/ISSUE_TEMPLATE/00-bug_report_zh.yml
vendored
@ -25,11 +25,11 @@ body:
|
||||
- label: |
|
||||
我确认我的描述清晰,语法礼貌,能帮助开发者快速定位问题,并符合社区规则。
|
||||
- label: |
|
||||
我已确认阅读了[OpenList文档](https://docs.oplist.org)。
|
||||
我已确认阅读了[OpenList文档](https://doc.oplist.org)。
|
||||
- label: |
|
||||
我已确认没有重复的问题或讨论。
|
||||
- label: |
|
||||
我已确认是`OpenList`的问题,而不是其他原因(例如 [网络](https://docs.oplist.org/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`依赖`或`操作`)。
|
||||
我已确认是`OpenList`的问题,而不是其他原因(例如 [网络](https://doc.oplist.org/faq/howto#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host-1) ,`依赖`或`操作`)。
|
||||
- label: |
|
||||
我认为此问题必须由`OpenList`处理,而非第三方。
|
||||
- label: |
|
||||
@ -72,7 +72,7 @@ body:
|
||||
attributes:
|
||||
label: 日志(可选)
|
||||
description: |
|
||||
请复制粘贴错误日志,或者截图。(可隐藏隐私字段)
|
||||
请复制粘贴错误日志,或者截图。(可隐藏隐私字段) [查看方法](https://doc.oplist.org/faq/howto#%E5%A6%82%E4%BD%95%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8Dbug)
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
attributes:
|
||||
|
6
.github/ISSUE_TEMPLATE/01-bug_report_en.yml
vendored
6
.github/ISSUE_TEMPLATE/01-bug_report_en.yml
vendored
@ -25,11 +25,11 @@ body:
|
||||
- label: |
|
||||
I confirm my description is clear, polite, helps developers quickly locate the issue, and complies with community rules.
|
||||
- label: |
|
||||
I have read the [OpenList documentation](https://docs.oplist.org).
|
||||
I have read the [OpenList documentation](https://doc.oplist.org).
|
||||
- label: |
|
||||
I confirm there are no duplicate issues or discussions.
|
||||
- label: |
|
||||
I confirm this is an `OpenList` issue, not caused by other reasons (such as [network](https://docs.oplist.org/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host), dependencies, or operation).
|
||||
I confirm this is an `OpenList` issue, not caused by other reasons (such as [network](https://doc.oplist.org/faq/howto#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host-1), dependencies, or operation).
|
||||
- label: |
|
||||
I believe this issue must be handled by `OpenList` and not by a third party.
|
||||
- label: |
|
||||
@ -72,7 +72,7 @@ body:
|
||||
attributes:
|
||||
label: Logs (optional)
|
||||
description: |
|
||||
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields)
|
||||
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) [Guide](https://doc.oplist.org/faq/howto#how-to-quickly-locate-bugs)
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
attributes:
|
||||
|
@ -19,7 +19,7 @@ body:
|
||||
- label: |
|
||||
我确认我的描述清晰,语法礼貌,能帮助开发者快速定位问题,并符合社区规则。
|
||||
- label: |
|
||||
我已确认阅读了[OpenList文档](https://docs.oplist.org)。
|
||||
我已确认阅读了[OpenList文档](https://doc.oplist.org)。
|
||||
- label: |
|
||||
我已确认没有重复的问题或讨论。
|
||||
- label: |
|
||||
|
@ -19,7 +19,7 @@ body:
|
||||
- label: |
|
||||
I confirm my description is clear, polite, helps developers quickly locate the issue, and complies with community rules.
|
||||
- label: |
|
||||
I have read the [OpenList documentation](https://docs.oplist.org).
|
||||
I have read the [OpenList documentation](https://doc.oplist.org).
|
||||
- label: |
|
||||
I confirm there are no duplicate issues or discussions.
|
||||
- label: |
|
||||
|
56
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
56
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
<!--
|
||||
Provide a general summary of your changes in the Title above.
|
||||
The PR title must start with `feat(): `, `docs(): `, `fix(): `, `style(): `, or `refactor(): `, `chore(): `. For example: `feat(component): add new feature`.
|
||||
If it spans multiple components, use the main component as the prefix and enumerate in the title, describe in the body.
|
||||
-->
|
||||
<!--
|
||||
在上方标题中提供您更改的总体摘要。
|
||||
PR 标题需以 `feat(): `, `docs(): `, `fix(): `, `style(): `, `refactor(): `, `chore(): ` 其中之一开头,例如:`feat(component): 新增功能`。
|
||||
如果跨多个组件,请使用主要组件作为前缀,并在标题中枚举、描述中说明。
|
||||
-->
|
||||
|
||||
## Description / 描述
|
||||
|
||||
<!-- Describe your changes in detail -->
|
||||
<!-- 详细描述您的更改 -->
|
||||
|
||||
## Motivation and Context / 背景
|
||||
|
||||
<!-- Why is this change required? What problem does it solve? -->
|
||||
<!-- 为什么需要此更改?它解决了什么问题? -->
|
||||
|
||||
<!-- If it fixes an open issue, please link to the issue here. -->
|
||||
<!-- 如果修复了一个打开的issue,请在此处链接到该issue -->
|
||||
|
||||
Closes #XXXX
|
||||
|
||||
<!-- or -->
|
||||
<!-- 或者 -->
|
||||
|
||||
Relates to #XXXX
|
||||
|
||||
## How Has This Been Tested? / 测试
|
||||
|
||||
<!-- Please describe in detail how you tested your changes. -->
|
||||
<!-- 请详细描述您如何测试更改 -->
|
||||
|
||||
## Checklist / 检查清单
|
||||
|
||||
<!-- Go over all the following points, and put an `x` in all the boxes that apply. -->
|
||||
<!-- 检查以下所有要点,并在所有适用的框中打`x` -->
|
||||
|
||||
<!-- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
|
||||
<!-- 如果您对其中任何一项不确定,请不要犹豫提问。我们会帮助您! -->
|
||||
|
||||
- [ ] I have read the [CONTRIBUTING](https://github.com/OpenListTeam/OpenList/blob/main/CONTRIBUTING.md) document.
|
||||
我已阅读 [CONTRIBUTING](https://github.com/OpenListTeam/OpenList/blob/main/CONTRIBUTING.md) 文档。
|
||||
- [ ] I have formatted my code with `go fmt` or [prettier](https://prettier.io/).
|
||||
我已使用 `go fmt` 或 [prettier](https://prettier.io/) 格式化提交的代码。
|
||||
- [ ] I have added appropriate labels to this PR (or mentioned needed labels in the description if lacking permissions).
|
||||
我已为此 PR 添加了适当的标签(如无权限或需要的标签不存在,请在描述中说明,管理员将后续处理)。
|
||||
- [ ] I have requested review from relevant code authors using the "Request review" feature when applicable.
|
||||
我已在适当情况下使用"Request review"功能请求相关代码作者进行审查。
|
||||
- [ ] I have updated the repository accordingly (If it’s needed).
|
||||
我已相应更新了相关仓库(若适用)。
|
||||
- [ ] [OpenList-Frontend](https://github.com/OpenListTeam/OpenList-Frontend) #XXXX
|
||||
- [ ] [OpenList-Docs](https://github.com/OpenListTeam/OpenList-Docs) #XXXX
|
21
.github/config.yml
vendored
21
.github/config.yml
vendored
@ -1,21 +0,0 @@
|
||||
# Configuration for welcome - https://github.com/behaviorbot/welcome
|
||||
|
||||
# Configuration for new-issue-welcome - https://github.com/behaviorbot/new-issue-welcome
|
||||
|
||||
# Comment to be posted to on first time issues
|
||||
newIssueWelcomeComment: >
|
||||
Thanks for opening your first issue here! Be sure to follow the issue template!
|
||||
|
||||
# Configuration for new-pr-welcome - https://github.com/behaviorbot/new-pr-welcome
|
||||
|
||||
# Comment to be posted to on PRs from first time contributors in your repository
|
||||
newPRWelcomeComment: >
|
||||
Thanks for opening this pull request! Please check out our contributing guidelines.
|
||||
|
||||
# Configuration for first-pr-merge - https://github.com/behaviorbot/first-pr-merge
|
||||
|
||||
# Comment to be posted to on pull requests merged by a first time user
|
||||
firstPRMergeComment: >
|
||||
Congrats on merging your first pull request! We here at behavior bot are proud of you!
|
||||
|
||||
# It is recommend to include as many gifs and emojis as possible
|
21
.github/stale.yml
vendored
21
.github/stale.yml
vendored
@ -1,21 +0,0 @@
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
daysUntilStale: 44
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 20
|
||||
# Issues with these labels will never be considered stale
|
||||
exemptLabels:
|
||||
- accepted
|
||||
- security
|
||||
- working
|
||||
- pr-welcome
|
||||
# Label to use when marking an issue as stale
|
||||
staleLabel: stale
|
||||
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
# Comment to post when closing a stale issue. Set to `false` to disable
|
||||
closeComment: >
|
||||
This issue was closed due to inactive more than 52 days. You can reopen or
|
||||
recreate it if you think it should continue. Thank you for your contributions again.
|
20
.github/workflows/beta_release.yml
vendored
20
.github/workflows/beta_release.yml
vendored
@ -14,12 +14,8 @@ permissions:
|
||||
|
||||
jobs:
|
||||
changelog:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ubuntu-latest]
|
||||
go-version: ["1.21"]
|
||||
name: Beta Release Changelog
|
||||
runs-on: ${{ matrix.platform }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@ -65,7 +61,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target: "!(*musl*|*windows-arm64*|*android*|*freebsd*)" # xgo
|
||||
- target: "!(*musl*|*windows-arm64*|*windows7-*|*android*|*freebsd*)" # xgo and loongarch
|
||||
hash: "md5"
|
||||
- target: "linux-!(arm*)-musl*" #musl-not-arm
|
||||
hash: "md5-linux-musl"
|
||||
@ -73,6 +69,8 @@ jobs:
|
||||
hash: "md5-linux-musl-arm"
|
||||
- target: "windows-arm64" #win-arm64
|
||||
hash: "md5-windows-arm64"
|
||||
- target: "windows7-*" #win7
|
||||
hash: "md5-windows7"
|
||||
- target: "android-*" #android
|
||||
hash: "md5-android"
|
||||
- target: "freebsd-*" #freebsd
|
||||
@ -89,27 +87,29 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
go-version: "1.25.0"
|
||||
|
||||
- name: Setup web
|
||||
run: bash build.sh dev web
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Build
|
||||
uses: OpenListTeam/cgo-actions@v1.1.2
|
||||
uses: OpenListTeam/cgo-actions@v1.2.2
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
musl-target-format: $os-$musl-$arch
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
out-dir: build
|
||||
output: openlist-$target$ext
|
||||
musl-base-url: "https://github.com/OpenListTeam/musl-compilers/releases/latest/download/"
|
||||
x-flags: |
|
||||
github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at
|
||||
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=OpenList
|
||||
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=The OpenList Projects Contributors <noreply@openlist.team>
|
||||
github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit
|
||||
github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag
|
||||
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=dev
|
||||
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=rolling
|
||||
|
||||
- name: Compress
|
||||
run: |
|
||||
|
20
.github/workflows/build.yml
vendored
20
.github/workflows/build.yml
vendored
@ -1,8 +1,6 @@
|
||||
name: Test Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["main"]
|
||||
pull_request:
|
||||
branches: ["main"]
|
||||
workflow_dispatch:
|
||||
@ -15,7 +13,6 @@ jobs:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ubuntu-latest]
|
||||
target:
|
||||
- darwin-amd64
|
||||
- darwin-arm64
|
||||
@ -24,8 +21,8 @@ jobs:
|
||||
- linux-amd64-musl
|
||||
- windows-arm64
|
||||
- android-arm64
|
||||
name: Build
|
||||
runs-on: ${{ matrix.platform }}
|
||||
name: Build ${{ matrix.target }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@ -36,28 +33,31 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
go-version: "1.25.0"
|
||||
|
||||
- name: Setup web
|
||||
run: bash build.sh dev web
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Build
|
||||
uses: OpenListTeam/cgo-actions@v1.1.2
|
||||
uses: OpenListTeam/cgo-actions@v1.2.2
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
musl-target-format: $os-$musl-$arch
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
out-dir: build
|
||||
x-flags: |
|
||||
github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at
|
||||
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=OpenList
|
||||
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=The OpenList Projects Contributors <noreply@openlist.team>
|
||||
github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit
|
||||
github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag
|
||||
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=dev
|
||||
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=rolling
|
||||
output: openlist$ext
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: openlist_${{ env.SHA }}_${{ matrix.target }}
|
||||
name: openlist_${{ steps.short-sha.outputs.sha }}_${{ matrix.target }}
|
||||
path: build/*
|
||||
|
2
.github/workflows/changelog.yml
vendored
2
.github/workflows/changelog.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: Automatic changelog
|
||||
name: Release Automatic changelog
|
||||
|
||||
on:
|
||||
push:
|
||||
|
61
.github/workflows/issue_pr_comment.yml
vendored
Normal file
61
.github/workflows/issue_pr_comment.yml
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
name: Issue or PR Auto Reply
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
pull_request:
|
||||
types: [opened]
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
auto-reply:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'issues'
|
||||
steps:
|
||||
- name: Check issue for unchecked tasks and reply
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const issueBody = context.payload.issue.body || "";
|
||||
const unchecked = /- \[ \] /.test(issueBody);
|
||||
let comment = "感谢您联系OpenList。我们会尽快回复您。\n";
|
||||
comment += "Thanks for contacting OpenList. We will reply to you as soon as possible.\n\n";
|
||||
if (unchecked) {
|
||||
comment += "由于您提出的 Issue 中包含部分未确认的项目,为了更好地管理项目,在人工审核后可能会直接关闭此问题。\n";
|
||||
comment += "如果您能确认并补充相关未确认项目的信息,欢迎随时重新提交。我们会及时关注并处理。感谢您的理解与支持!\n";
|
||||
comment += "Since your issue contains some unchecked tasks, it may be closed after manual review.\n";
|
||||
comment += "If you can confirm and provide information for the unchecked tasks, feel free to resubmit.\n";
|
||||
comment += "We will pay attention and handle it in a timely manner.\n\n";
|
||||
comment += "感谢您的理解与支持!\n";
|
||||
comment += "Thank you for your understanding and support!\n";
|
||||
}
|
||||
await github.rest.issues.createComment({
|
||||
...context.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: comment
|
||||
});
|
||||
|
||||
pr-title-check:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'pull_request'
|
||||
steps:
|
||||
- name: Check PR title for required prefix and comment
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const title = context.payload.pull_request.title || "";
|
||||
const ok = /^(feat|docs|fix|style|refactor|chore)\(.+?\): /i.test(title);
|
||||
if (!ok) {
|
||||
let comment = "⚠️ PR 标题需以 `feat(): `, `docs(): `, `fix(): `, `style(): `, `refactor(): `, `chore(): ` 其中之一开头,例如:`feat(component): 新增功能`。\n";
|
||||
comment += "⚠️ The PR title must start with `feat(): `, `docs(): `, `fix(): `, `style(): `, or `refactor(): `, `chore(): `. For example: `feat(component): add new feature`.\n\n";
|
||||
comment += "如果跨多个组件,请使用主要组件作为前缀,并在标题中枚举、描述中说明。\n";
|
||||
comment += "If it spans multiple components, use the main component as the prefix and enumerate in the title, describe in the body.\n\n";
|
||||
await github.rest.issues.createComment({
|
||||
...context.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: comment
|
||||
});
|
||||
}
|
103
.github/workflows/release.yml
vendored
103
.github/workflows/release.yml
vendored
@ -8,24 +8,34 @@ permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
# Set release to prerelease first
|
||||
prerelease:
|
||||
name: Set Prerelease
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Prerelease
|
||||
uses: irongut/EditRelease@v1.2.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
id: ${{ github.event.release.id }}
|
||||
prerelease: true
|
||||
|
||||
# Main release job for all platforms
|
||||
release:
|
||||
needs: prerelease
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
build-type: [ 'standard', 'lite' ]
|
||||
target-platform: [ '', 'android', 'freebsd', 'linux_musl', 'linux_musl_arm' ]
|
||||
name: Release ${{ matrix.target-platform && format('{0} ', matrix.target-platform) || '' }}${{ matrix.build-type == 'lite' && 'Lite' || '' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
if: matrix.target-platform == ''
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
# this might remove tools that are actually needed,
|
||||
# if set to "true" but frees about 6 GB
|
||||
tool-cache: false
|
||||
|
||||
# all of these default to true, but feel free to set to
|
||||
# "false" if necessary for your workflow
|
||||
android: true
|
||||
dotnet: true
|
||||
haskell: true
|
||||
@ -33,17 +43,10 @@ jobs:
|
||||
docker-images: true
|
||||
swap-storage: true
|
||||
|
||||
- name: Prerelease
|
||||
uses: irongut/EditRelease@v1.2.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
id: ${{ github.event.release.id }}
|
||||
prerelease: true
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
go-version: '1.25.0'
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@ -51,6 +54,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install dependencies
|
||||
if: matrix.target-platform == ''
|
||||
run: |
|
||||
sudo snap install zig --classic --beta
|
||||
docker pull crazymax/xgo:latest
|
||||
@ -59,74 +63,15 @@ jobs:
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
||||
prerelease: false
|
||||
|
||||
release-lite:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Release Lite
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
# this might remove tools that are actually needed,
|
||||
# if set to "true" but frees about 6 GB
|
||||
tool-cache: false
|
||||
|
||||
# all of these default to true, but feel free to set to
|
||||
# "false" if necessary for your workflow
|
||||
android: true
|
||||
dotnet: true
|
||||
haskell: true
|
||||
large-packages: true
|
||||
docker-images: true
|
||||
swap-storage: true
|
||||
|
||||
- name: Prerelease
|
||||
uses: irongut/EditRelease@v1.2.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
id: ${{ github.event.release.id }}
|
||||
prerelease: true
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo snap install zig --classic --beta
|
||||
docker pull crazymax/xgo:latest
|
||||
go install github.com/crazy-max/xgo@latest
|
||||
sudo apt install upx
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release lite
|
||||
bash build.sh release ${{ matrix.build-type == 'lite' && 'lite' || '' }} ${{ matrix.target-platform }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
||||
prerelease: false
|
||||
tag_name: ${{ github.event.release.tag_name }}
|
||||
|
||||
|
69
.github/workflows/release_android.yml
vendored
69
.github/workflows/release_android.yml
vendored
@ -1,69 +0,0 @@
|
||||
name: Release builds (Android)
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
release_android:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release android
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
||||
|
||||
release_android_lite:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release lite android
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
65
.github/workflows/release_docker.yml
vendored
65
.github/workflows/release_docker.yml
vendored
@ -31,11 +31,8 @@ env:
|
||||
REGISTRY: ghcr.io
|
||||
ARTIFACT_NAME: 'binaries_docker_release'
|
||||
ARTIFACT_NAME_LITE: 'binaries_docker_release_lite'
|
||||
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
|
||||
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/ppc64le,linux/riscv64,linux/loong64' ### Temporarily disable Docker builds for linux/s390x architectures for unknown reasons.
|
||||
IMAGE_PUSH: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }}
|
||||
IMAGE_IS_PROD: ${{ github.ref_type == 'tag' || github.event.inputs.as_latest == 'true' }}
|
||||
IMAGE_TAGS_BETA: |
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
permissions:
|
||||
packages: write
|
||||
@ -50,7 +47,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
go-version: '1.25.0'
|
||||
|
||||
- name: Cache Musl
|
||||
id: cache-musl
|
||||
@ -65,17 +62,11 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build go binary (beta)
|
||||
if: env.IMAGE_IS_PROD != 'true'
|
||||
run: bash build.sh beta docker-multiplatform
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build go binary (release)
|
||||
if: env.IMAGE_IS_PROD == 'true'
|
||||
run: bash build.sh release docker-multiplatform
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
@ -88,7 +79,7 @@ jobs:
|
||||
!build/musl-libs/**
|
||||
|
||||
build_binary_lite:
|
||||
name: Build Binaries for Docker Release
|
||||
name: Build Binaries for Docker Release (Lite)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
@ -96,7 +87,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
go-version: '1.25.0'
|
||||
|
||||
- name: Cache Musl
|
||||
id: cache-musl
|
||||
@ -111,17 +102,11 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build go binary (beta)
|
||||
if: env.IMAGE_IS_PROD != 'true'
|
||||
run: bash build.sh beta lite docker-multiplatform
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build go binary (release)
|
||||
if: env.IMAGE_IS_PROD == 'true'
|
||||
run: bash build.sh release lite docker-multiplatform
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
@ -142,15 +127,19 @@ jobs:
|
||||
image: ["latest", "ffmpeg", "aria2", "aio"]
|
||||
include:
|
||||
- image: "latest"
|
||||
base_image_tag: "base"
|
||||
build_arg: ""
|
||||
tag_favor: ""
|
||||
- image: "ffmpeg"
|
||||
base_image_tag: "ffmpeg"
|
||||
build_arg: INSTALL_FFMPEG=true
|
||||
tag_favor: "suffix=-ffmpeg,onlatest=true"
|
||||
- image: "aria2"
|
||||
base_image_tag: "aria2"
|
||||
build_arg: INSTALL_ARIA2=true
|
||||
tag_favor: "suffix=-aria2,onlatest=true"
|
||||
- image: "aio"
|
||||
base_image_tag: "aio"
|
||||
build_arg: |
|
||||
INSTALL_FFMPEG=true
|
||||
INSTALL_ARIA2=true
|
||||
@ -181,7 +170,7 @@ jobs:
|
||||
if: env.IMAGE_PUSH == 'true'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_ORG_NAME }}
|
||||
username: ${{ vars.DOCKERHUB_ORG_NAME_BACKUP || env.DOCKERHUB_ORG_NAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Docker meta
|
||||
@ -192,13 +181,11 @@ jobs:
|
||||
${{ env.REGISTRY }}/${{ env.GHCR_ORG_NAME }}/${{ env.IMAGE_NAME }}
|
||||
${{ env.DOCKERHUB_ORG_NAME }}/${{ env.IMAGE_NAME_DOCKERHUB }}
|
||||
tags: >
|
||||
${{ env.IMAGE_IS_PROD == 'true' && (
|
||||
github.event_name == 'workflow_dispatch'
|
||||
${{ github.event_name == 'workflow_dispatch'
|
||||
&& format('type=raw,value={0}', github.event.inputs.manual_tag)
|
||||
|| format('type=raw,value={0}', github.ref_name)
|
||||
) || env.IMAGE_TAGS_BETA }}
|
||||
|| format('type=raw,value={0}', github.ref_name) }}
|
||||
flavor: |
|
||||
latest=${{ env.IMAGE_IS_PROD }}
|
||||
latest=${{ github.event_name == 'push' || github.event.inputs.as_latest == 'true' }}
|
||||
${{ matrix.tag_favor }}
|
||||
|
||||
- name: Build and push
|
||||
@ -208,29 +195,35 @@ jobs:
|
||||
context: .
|
||||
file: Dockerfile.ci
|
||||
push: ${{ env.IMAGE_PUSH == 'true' }}
|
||||
build-args: ${{ matrix.build_arg }}
|
||||
build-args: |
|
||||
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
|
||||
${{ matrix.build_arg }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: ${{ env.RELEASE_PLATFORMS }}
|
||||
|
||||
release_docker_lite:
|
||||
needs: build_binary_lite
|
||||
name: Release Docker image
|
||||
name: Release Docker image (Lite)
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
image: ["latest", "ffmpeg", "aria2", "aio"]
|
||||
include:
|
||||
- image: "latest"
|
||||
base_image_tag: "base"
|
||||
build_arg: ""
|
||||
tag_favor: "suffix=-lite,onlatest=true"
|
||||
- image: "ffmpeg"
|
||||
base_image_tag: "ffmpeg"
|
||||
build_arg: INSTALL_FFMPEG=true
|
||||
tag_favor: "suffix=-lite-ffmpeg,onlatest=true"
|
||||
- image: "aria2"
|
||||
base_image_tag: "aria2"
|
||||
build_arg: INSTALL_ARIA2=true
|
||||
tag_favor: "suffix=-lite-aria2,onlatest=true"
|
||||
- image: "aio"
|
||||
base_image_tag: "aio"
|
||||
build_arg: |
|
||||
INSTALL_FFMPEG=true
|
||||
INSTALL_ARIA2=true
|
||||
@ -261,7 +254,7 @@ jobs:
|
||||
if: env.IMAGE_PUSH == 'true'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_ORG_NAME }}
|
||||
username: ${{ vars.DOCKERHUB_ORG_NAME_BACKUP || env.DOCKERHUB_ORG_NAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Docker meta
|
||||
@ -272,13 +265,11 @@ jobs:
|
||||
${{ env.REGISTRY }}/${{ env.GHCR_ORG_NAME }}/${{ env.IMAGE_NAME }}
|
||||
${{ env.DOCKERHUB_ORG_NAME }}/${{ env.IMAGE_NAME_DOCKERHUB }}
|
||||
tags: >
|
||||
${{ env.IMAGE_IS_PROD == 'true' && (
|
||||
github.event_name == 'workflow_dispatch'
|
||||
${{ github.event_name == 'workflow_dispatch'
|
||||
&& format('type=raw,value={0}', github.event.inputs.manual_tag)
|
||||
|| format('type=raw,value={0}', github.ref_name)
|
||||
) || env.IMAGE_TAGS_BETA }}
|
||||
|| format('type=raw,value={0}', github.ref_name) }}
|
||||
flavor: |
|
||||
latest=${{ env.IMAGE_IS_PROD }}
|
||||
latest=${{ github.event_name == 'push' || github.event.inputs.as_latest == 'true' }}
|
||||
${{ matrix.tag_favor }}
|
||||
|
||||
- name: Build and push
|
||||
@ -288,7 +279,9 @@ jobs:
|
||||
context: .
|
||||
file: Dockerfile.ci
|
||||
push: ${{ env.IMAGE_PUSH == 'true' }}
|
||||
build-args: ${{ matrix.build_arg }}
|
||||
build-args: |
|
||||
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
|
||||
${{ matrix.build_arg }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: ${{ env.RELEASE_PLATFORMS }}
|
||||
|
69
.github/workflows/release_freebsd.yml
vendored
69
.github/workflows/release_freebsd.yml
vendored
@ -1,69 +0,0 @@
|
||||
name: Release builds (Freebsd)
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
release_freebsd:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release freebsd
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
||||
|
||||
release_freebsd_lite:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release lite freebsd
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
69
.github/workflows/release_linux_musl.yml
vendored
69
.github/workflows/release_linux_musl.yml
vendored
@ -1,69 +0,0 @@
|
||||
name: Release builds (linux_musl)
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
release_linux_musl:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release linux_musl
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
||||
|
||||
release_linux_musl_lite:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release lite linux_musl
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
70
.github/workflows/release_linux_musl_arm.yml
vendored
70
.github/workflows/release_linux_musl_arm.yml
vendored
@ -1,70 +0,0 @@
|
||||
name: Release builds (linux_musl_arm)
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
release_linux_musl_arm:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release linux_musl_arm
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
||||
|
||||
release_linux_musl_arm_lite:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release lite linux_musl_arm
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
||||
|
38
.github/workflows/sync_repo.yml
vendored
Normal file
38
.github/workflows/sync_repo.yml
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
name: Sync to Gitee
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
sync:
|
||||
runs-on: ubuntu-latest
|
||||
name: Sync GitHub to Gitee
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup SSH
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
echo "${{ secrets.GITEE_SSH_PRIVATE_KEY }}" > ~/.ssh/id_rsa
|
||||
chmod 600 ~/.ssh/id_rsa
|
||||
ssh-keyscan gitee.com >> ~/.ssh/known_hosts
|
||||
|
||||
- name: Create single commit and push
|
||||
run: |
|
||||
git config user.name "GitHub Actions"
|
||||
git config user.email "actions@github.com"
|
||||
|
||||
# Create a new branch
|
||||
git checkout --orphan new-main
|
||||
git add .
|
||||
git commit -m "Sync from GitHub: $(date)"
|
||||
|
||||
# Add Gitee remote and force push
|
||||
git remote add gitee ${{ vars.GITEE_REPO_URL }}
|
||||
git push --force gitee new-main:main
|
22
.github/workflows/test_docker.yml
vendored
22
.github/workflows/test_docker.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: Docker Beta Release
|
||||
name: Beta Release (Docker)
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@ -20,8 +20,7 @@ env:
|
||||
IMAGE_NAME_DOCKERHUB: openlist
|
||||
REGISTRY: ghcr.io
|
||||
ARTIFACT_NAME: 'binaries_docker_release'
|
||||
ARTIFACT_NAME_LITE: 'binaries_docker_release_lite'
|
||||
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
|
||||
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/ppc64le,linux/riscv64,linux/loong64' ### Temporarily disable Docker builds for linux/s390x architectures for unknown reasons.
|
||||
IMAGE_PUSH: ${{ github.event_name == 'push' }}
|
||||
IMAGE_TAGS_BETA: |
|
||||
type=ref,event=pr
|
||||
@ -29,7 +28,7 @@ env:
|
||||
|
||||
jobs:
|
||||
build_binary:
|
||||
name: Build Binaries for Docker Release
|
||||
name: Build Binaries for Docker Release (Beta)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
@ -37,7 +36,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
go-version: '1.25.0'
|
||||
|
||||
- name: Cache Musl
|
||||
id: cache-musl
|
||||
@ -56,6 +55,7 @@ jobs:
|
||||
run: bash build.sh beta docker-multiplatform
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
@ -69,7 +69,7 @@ jobs:
|
||||
|
||||
release_docker:
|
||||
needs: build_binary
|
||||
name: Release Docker image
|
||||
name: Release Docker image (Beta)
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
@ -78,15 +78,19 @@ jobs:
|
||||
image: ["latest", "ffmpeg", "aria2", "aio"]
|
||||
include:
|
||||
- image: "latest"
|
||||
base_image_tag: "base"
|
||||
build_arg: ""
|
||||
tag_favor: ""
|
||||
- image: "ffmpeg"
|
||||
base_image_tag: "ffmpeg"
|
||||
build_arg: INSTALL_FFMPEG=true
|
||||
tag_favor: "suffix=-ffmpeg,onlatest=true"
|
||||
- image: "aria2"
|
||||
base_image_tag: "aria2"
|
||||
build_arg: INSTALL_ARIA2=true
|
||||
tag_favor: "suffix=-aria2,onlatest=true"
|
||||
- image: "aio"
|
||||
base_image_tag: "aio"
|
||||
build_arg: |
|
||||
INSTALL_FFMPEG=true
|
||||
INSTALL_ARIA2=true
|
||||
@ -117,7 +121,7 @@ jobs:
|
||||
if: env.IMAGE_PUSH == 'true'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_ORG_NAME }}
|
||||
username: ${{ vars.DOCKERHUB_ORG_NAME_BACKUP || env.DOCKERHUB_ORG_NAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Docker meta
|
||||
@ -138,7 +142,9 @@ jobs:
|
||||
context: .
|
||||
file: Dockerfile.ci
|
||||
push: ${{ env.IMAGE_PUSH == 'true' }}
|
||||
build-args: ${{ matrix.build_arg }}
|
||||
build-args: |
|
||||
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
|
||||
${{ matrix.build_arg }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: ${{ env.RELEASE_PLATFORMS }}
|
||||
|
@ -19,7 +19,7 @@ jobs:
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.EXTERNAL_REPO_TOKEN_LUCI_APP_OPENLIST }}
|
||||
repository: ${{ vars.HOOK_REPO || 'OpenListTeam/luci-app-openlist' }}
|
||||
repository: ${{ vars.HOOK_REPO || 'OpenListTeam/OpenList-OpenWRT' }}
|
||||
event-type: update-hashes
|
||||
client-payload: |
|
||||
{
|
||||
|
110
CONTRIBUTING.md
110
CONTRIBUTING.md
@ -2,106 +2,76 @@
|
||||
|
||||
## Setup your machine
|
||||
|
||||
`OpenList` is written in [Go](https://golang.org/) and [React](https://reactjs.org/).
|
||||
`OpenList` is written in [Go](https://golang.org/) and [SolidJS](https://www.solidjs.com/).
|
||||
|
||||
Prerequisites:
|
||||
|
||||
- [git](https://git-scm.com)
|
||||
- [Go 1.20+](https://golang.org/doc/install)
|
||||
- [Go 1.24+](https://golang.org/doc/install)
|
||||
- [gcc](https://gcc.gnu.org/)
|
||||
- [nodejs](https://nodejs.org/)
|
||||
|
||||
Clone `OpenList` and `OpenList-Frontend` anywhere:
|
||||
## Cloning a fork
|
||||
|
||||
Fork and clone `OpenList` and `OpenList-Frontend` anywhere:
|
||||
|
||||
```shell
|
||||
$ git clone https://github.com/OpenListTeam/OpenList.git
|
||||
$ git clone --recurse-submodules https://github.com/OpenListTeam/OpenList-Frontend.git
|
||||
$ git clone https://github.com/<your-username>/OpenList.git
|
||||
$ git clone --recurse-submodules https://github.com/<your-username>/OpenList-Frontend.git
|
||||
```
|
||||
|
||||
## Creating a branch
|
||||
|
||||
Create a new branch from the `main` branch, with an appropriate name.
|
||||
|
||||
```shell
|
||||
$ git checkout -b <branch-name>
|
||||
```
|
||||
You should switch to the `main` branch for development.
|
||||
|
||||
## Preview your change
|
||||
|
||||
### backend
|
||||
|
||||
```shell
|
||||
$ go run main.go
|
||||
```
|
||||
|
||||
### frontend
|
||||
|
||||
```shell
|
||||
$ pnpm dev
|
||||
```
|
||||
|
||||
## Add a new driver
|
||||
|
||||
Copy `drivers/template` folder and rename it, and follow the comments in it.
|
||||
|
||||
## Create a commit
|
||||
|
||||
Commit messages should be well formatted, and to make that "standardized".
|
||||
|
||||
### Commit Message Format
|
||||
Each commit message consists of a **header**, a **body** and a **footer**. The header has a special
|
||||
format that includes a **type**, a **scope** and a **subject**:
|
||||
Submit your pull request. For PR titles, follow [Conventional Commits](https://www.conventionalcommits.org).
|
||||
|
||||
```
|
||||
<type>(<scope>): <subject>
|
||||
<BLANK LINE>
|
||||
<body>
|
||||
<BLANK LINE>
|
||||
<footer>
|
||||
```
|
||||
https://github.com/OpenListTeam/OpenList/issues/376
|
||||
|
||||
The **header** is mandatory and the **scope** of the header is optional.
|
||||
|
||||
Any line of the commit message cannot be longer than 100 characters! This allows the message to be easier
|
||||
to read on GitHub as well as in various git tools.
|
||||
|
||||
### Revert
|
||||
If the commit reverts a previous commit, it should begin with `revert: `, followed by the header
|
||||
of the reverted commit.
|
||||
In the body it should say: `This reverts commit <hash>.`, where the hash is the SHA of the commit
|
||||
being reverted.
|
||||
|
||||
### Type
|
||||
Must be one of the following:
|
||||
|
||||
* **feat**: A new feature
|
||||
* **fix**: A bug fix
|
||||
* **docs**: Documentation only changes
|
||||
* **style**: Changes that do not affect the meaning of the code (white-space, formatting, missing
|
||||
semi-colons, etc)
|
||||
* **refactor**: A code change that neither fixes a bug nor adds a feature
|
||||
* **perf**: A code change that improves performance
|
||||
* **test**: Adding missing or correcting existing tests
|
||||
* **build**: Affects project builds or dependency modifications
|
||||
* **revert**: Restore the previous commit
|
||||
* **ci**: Continuous integration of related file modifications
|
||||
* **chore**: Changes to the build process or auxiliary tools and libraries such as documentation
|
||||
generation
|
||||
* **release**: Release a new version
|
||||
|
||||
### Scope
|
||||
The scope could be anything specifying place of the commit change. For example `$location`,
|
||||
`$browser`, `$compile`, `$rootScope`, `ngHref`, `ngClick`, `ngView`, etc...
|
||||
|
||||
You can use `*` when the change affects more than a single scope.
|
||||
|
||||
### Subject
|
||||
The subject contains succinct description of the change:
|
||||
|
||||
* use the imperative, present tense: "change" not "changed" nor "changes"
|
||||
* don't capitalize first letter
|
||||
* no dot (.) at the end
|
||||
|
||||
### Body
|
||||
Just as in the **subject**, use the imperative, present tense: "change" not "changed" nor "changes".
|
||||
The body should include the motivation for the change and contrast this with previous behavior.
|
||||
|
||||
### Footer
|
||||
The footer should contain any information about **Breaking Changes** and is also the place to
|
||||
[reference GitHub issues that this commit closes](https://help.github.com/articles/closing-issues-via-commit-messages/).
|
||||
|
||||
**Breaking Changes** should start with the word `BREAKING CHANGE:` with a space or two newlines.
|
||||
The rest of the commit message is then used for this.
|
||||
It's suggested to sign your commits. See: [How to sign commits](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits)
|
||||
|
||||
## Submit a pull request
|
||||
|
||||
Push your branch to your `openlist` fork and open a pull request against the
|
||||
`main` branch.
|
||||
Please make sure your code has been formatted with `go fmt` or [prettier](https://prettier.io/) before submitting.
|
||||
|
||||
Push your branch to your `openlist` fork and open a pull request against the `main` branch.
|
||||
|
||||
## Merge your pull request
|
||||
|
||||
Your pull request will be merged after review. Please wait for the maintainer to merge your pull request after review.
|
||||
|
||||
At least 1 approving review is required by reviewers with write access. You can also request a review from maintainers.
|
||||
|
||||
## Delete your branch
|
||||
|
||||
(Optional) After your pull request is merged, you can delete your branch.
|
||||
|
||||
---
|
||||
|
||||
Thank you for your contribution! Let's make OpenList better together!
|
||||
|
40
Dockerfile
40
Dockerfile
@ -1,4 +1,7 @@
|
||||
FROM docker.io/library/alpine:edge AS builder
|
||||
### Default image is base. You can add other support by modifying BASE_IMAGE_TAG. The following parameters are supported: base (default), aria2, ffmpeg, aio
|
||||
ARG BASE_IMAGE_TAG=base
|
||||
|
||||
FROM alpine:edge AS builder
|
||||
LABEL stage=go-builder
|
||||
WORKDIR /app/
|
||||
RUN apk add --no-cache bash curl jq gcc git go musl-dev
|
||||
@ -7,36 +10,27 @@ RUN go mod download
|
||||
COPY ./ ./
|
||||
RUN bash build.sh release docker
|
||||
|
||||
FROM alpine:edge
|
||||
|
||||
FROM openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
|
||||
LABEL MAINTAINER="OpenList"
|
||||
ARG INSTALL_FFMPEG=false
|
||||
ARG INSTALL_ARIA2=false
|
||||
LABEL MAINTAINER="OpenList"
|
||||
ARG USER=openlist
|
||||
ARG UID=1001
|
||||
ARG GID=1001
|
||||
|
||||
WORKDIR /opt/openlist/
|
||||
|
||||
RUN apk update && \
|
||||
apk upgrade --no-cache && \
|
||||
apk add --no-cache bash ca-certificates su-exec tzdata; \
|
||||
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
|
||||
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
|
||||
mkdir -p /opt/aria2/.aria2 && \
|
||||
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
|
||||
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
|
||||
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
|
||||
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
|
||||
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
|
||||
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
|
||||
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
|
||||
touch /opt/aria2/.aria2/aria2.session && \
|
||||
/opt/aria2/.aria2/tracker.sh ; \
|
||||
rm -rf /var/cache/apk/*
|
||||
RUN addgroup -g ${GID} ${USER} && \
|
||||
adduser -D -u ${UID} -G ${USER} ${USER} && \
|
||||
mkdir -p /opt/openlist/data
|
||||
|
||||
COPY --chmod=755 --from=builder /app/bin/openlist ./
|
||||
COPY --chmod=755 entrypoint.sh /entrypoint.sh
|
||||
COPY --from=builder --chmod=755 --chown=${UID}:${GID} /app/bin/openlist ./
|
||||
COPY --chmod=755 --chown=${UID}:${GID} entrypoint.sh /entrypoint.sh
|
||||
|
||||
USER ${USER}
|
||||
RUN /entrypoint.sh version
|
||||
|
||||
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
VOLUME /opt/openlist/data/
|
||||
EXPOSE 5244 5245
|
||||
CMD [ "/entrypoint.sh" ]
|
||||
|
@ -1,34 +1,26 @@
|
||||
FROM docker.io/library/alpine:edge
|
||||
|
||||
ARG BASE_IMAGE_TAG=base
|
||||
FROM ghcr.io/openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
|
||||
LABEL MAINTAINER="OpenList"
|
||||
ARG TARGETPLATFORM
|
||||
ARG INSTALL_FFMPEG=false
|
||||
ARG INSTALL_ARIA2=false
|
||||
LABEL MAINTAINER="OpenList"
|
||||
ARG USER=openlist
|
||||
ARG UID=1001
|
||||
ARG GID=1001
|
||||
|
||||
WORKDIR /opt/openlist/
|
||||
|
||||
RUN apk update && \
|
||||
apk upgrade --no-cache && \
|
||||
apk add --no-cache bash ca-certificates su-exec tzdata; \
|
||||
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
|
||||
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
|
||||
mkdir -p /opt/aria2/.aria2 && \
|
||||
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
|
||||
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
|
||||
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
|
||||
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
|
||||
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
|
||||
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
|
||||
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
|
||||
touch /opt/aria2/.aria2/aria2.session && \
|
||||
/opt/aria2/.aria2/tracker.sh ; \
|
||||
rm -rf /var/cache/apk/*
|
||||
RUN addgroup -g ${GID} ${USER} && \
|
||||
adduser -D -u ${UID} -G ${USER} ${USER} && \
|
||||
mkdir -p /opt/openlist/data
|
||||
|
||||
COPY --chmod=755 /build/${TARGETPLATFORM}/openlist ./
|
||||
COPY --chmod=755 entrypoint.sh /entrypoint.sh
|
||||
COPY --chmod=755 --chown=${UID}:${GID} /build/${TARGETPLATFORM}/openlist ./
|
||||
COPY --chmod=755 --chown=${UID}:${GID} entrypoint.sh /entrypoint.sh
|
||||
|
||||
USER ${USER}
|
||||
RUN /entrypoint.sh version
|
||||
|
||||
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
VOLUME /opt/openlist/data/
|
||||
EXPOSE 5244 5245
|
||||
CMD [ "/entrypoint.sh" ]
|
44
README.md
44
README.md
@ -20,6 +20,34 @@
|
||||
- [CODE OF CONDUCT](./CODE_OF_CONDUCT.md)
|
||||
- [LICENSE](./LICENSE)
|
||||
|
||||
## Disclaimer
|
||||
|
||||
OpenList is an open-source project independently maintained by the OpenList Team, following the AGPL-3.0 license and committed to maintaining complete code openness and modification transparency.
|
||||
|
||||
We have noticed the emergence of some third-party projects in the community with names similar to this project, such as OpenListApp/OpenListApp, as well as some paid proprietary software using the same or similar naming. To avoid user confusion, we hereby declare:
|
||||
|
||||
- OpenList has no official association with any third-party derivative projects.
|
||||
|
||||
- All software, code, and services of this project are maintained by the OpenList Team and are freely available on GitHub.
|
||||
|
||||
- Project documentation and API services primarily rely on charitable resources provided by Cloudflare. There are currently no paid plans or commercial deployments, and the use of existing features does not involve any costs.
|
||||
|
||||
We respect the community's rights to free use and derivative development, but we also strongly urge downstream projects:
|
||||
|
||||
- Should not use the "OpenList" name for impersonation promotion or commercial gain;
|
||||
|
||||
- Must not distribute OpenList-based code in a closed-source manner or violate AGPL license terms.
|
||||
|
||||
To better maintain healthy ecosystem development, we recommend:
|
||||
|
||||
- Clearly indicate the project source and choose appropriate open-source licenses in accordance with the open-source spirit;
|
||||
|
||||
- If involving commercial use, please avoid using "OpenList" or any confusing naming as the project name;
|
||||
|
||||
- If you need to use materials located under OpenListTeam/Logo, you may modify and use them under compliance with the agreement.
|
||||
|
||||
Thank you for your support and understanding of the OpenList project.
|
||||
|
||||
## Features
|
||||
|
||||
- [x] Multiple storages
|
||||
@ -46,7 +74,6 @@
|
||||
- [x] [Thunder](https://pan.xunlei.com)
|
||||
- [x] [Lanzou](https://www.lanzou.com)
|
||||
- [x] [ILanzou](https://www.ilanzou.com)
|
||||
- [x] [Aliyundrive share](https://www.alipan.com)
|
||||
- [x] [Google photo](https://photos.google.com)
|
||||
- [x] [Mega.nz](https://mega.nz)
|
||||
- [x] [Baidu photo](https://photo.baidu.com)
|
||||
@ -57,6 +84,16 @@
|
||||
- [x] [FeijiPan](https://www.feijipan.com)
|
||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||
- [x] [Chaoxing](https://www.chaoxing.com)
|
||||
- [x] [CNB](https://cnb.cool/)
|
||||
- [x] [Degoo](https://degoo.com)
|
||||
- [x] [Doubao](https://www.doubao.com)
|
||||
- [x] [Febbox](https://www.febbox.com)
|
||||
- [x] [GitHub](https://github.com)
|
||||
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||
- [x] [Weiyun](https://www.weiyun.com)
|
||||
|
||||
- [x] Easy to deploy and out-of-the-box
|
||||
- [x] File preview (PDF, markdown, code, plain text, ...)
|
||||
- [x] Image preview in gallery mode
|
||||
@ -78,8 +115,9 @@
|
||||
|
||||
## Document
|
||||
|
||||
- 📘 [Docs & Install Guide](https://docs.oplist.org)
|
||||
- 📚 [Backup Docs Site](https://docs.openlist.team)
|
||||
- 📘 [Global Site](https://doc.oplist.org)
|
||||
- 📚 [Backup Site](https://doc.openlist.team)
|
||||
- 🌏 [CN Site](https://doc.oplist.org.cn)
|
||||
|
||||
## Demo
|
||||
|
||||
|
43
README_cn.md
43
README_cn.md
@ -20,6 +20,34 @@
|
||||
- [行为准则](./CODE_OF_CONDUCT.md)
|
||||
- [许可证](./LICENSE)
|
||||
|
||||
## 免责声明
|
||||
|
||||
OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3.0 许可证,致力于保持完整的代码开放性和修改透明性。
|
||||
|
||||
我们注意到社区中出现了一些与本项目名称相似的第三方项目,如 OpenListApp/OpenListApp,以及部分采用相同或近似命名的收费专有软件。为避免用户误解,现声明如下:
|
||||
|
||||
- OpenList 与任何第三方衍生项目无官方关联。
|
||||
|
||||
- 本项目的全部软件、代码与服务由 OpenList 团队维护,可在 GitHub 免费获取。
|
||||
|
||||
- 项目文档与 API 服务均主要依托于 Cloudflare 提供的公益资源,目前无任何收费计划或商业部署,现有功能使用不涉及任何支出。
|
||||
|
||||
我们尊重社区的自由使用与衍生开发权利,但也强烈呼吁下游项目:
|
||||
|
||||
- 不应以“OpenList”名义进行冒名宣传或获取商业利益;
|
||||
|
||||
- 不得将基于 OpenList 的代码进行闭源分发或违反 AGPL 许可证条款。
|
||||
|
||||
为了更好地维护生态健康发展,我们建议:
|
||||
|
||||
- 明确注明项目来源,并以符合开源精神的方式选择适当的开源许可证;
|
||||
|
||||
- 如涉及商业用途,请避免使用“OpenList”或任何会产生混淆的方式作为项目名称;
|
||||
|
||||
- 若需使用本项目位于 OpenListTeam/Logo 下的素材,可在遵守协议的前提下进行修改后使用。
|
||||
|
||||
感谢您对 OpenList 项目的支持与理解。
|
||||
|
||||
## 功能
|
||||
|
||||
- [x] 多种存储
|
||||
@ -46,7 +74,6 @@
|
||||
- [x] [迅雷网盘](https://pan.xunlei.com)
|
||||
- [x] [蓝奏云](https://www.lanzou.com)
|
||||
- [x] [蓝奏云优享版](https://www.ilanzou.com)
|
||||
- [x] [阿里云盘分享](https://www.alipan.com)
|
||||
- [x] [Google 相册](https://photos.google.com)
|
||||
- [x] [Mega.nz](https://mega.nz)
|
||||
- [x] [百度相册](https://photo.baidu.com)
|
||||
@ -57,6 +84,15 @@
|
||||
- [x] [飞机盘](https://www.feijipan.com)
|
||||
- [x] [多吉云](https://www.dogecloud.com/product/oss)
|
||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||
- [x] [超星](https://www.chaoxing.com)
|
||||
- [x] [CNB](https://cnb.cool/)
|
||||
- [x] [Degoo](https://degoo.com)
|
||||
- [x] [豆包](https://www.doubao.com)
|
||||
- [x] [Febbox](https://www.febbox.com)
|
||||
- [x] [GitHub](https://github.com)
|
||||
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||
- [x] [微云](https://www.weiyun.com)
|
||||
- [x] 部署方便,开箱即用
|
||||
- [x] 文件预览(PDF、markdown、代码、纯文本等)
|
||||
- [x] 画廊模式下的图片预览
|
||||
@ -78,8 +114,9 @@
|
||||
|
||||
## 文档
|
||||
|
||||
- 📘 [文档与安装指南](https://docs.oplist.org)
|
||||
- 📚 [备用文档站点](https://docs.openlist.team)
|
||||
- 🌏 [国内站点](https://doc.oplist.org.cn)
|
||||
- 📘 [海外站点](https://doc.oplist.org)
|
||||
- 📚 [备用站点](https://doc.openlist.team)
|
||||
|
||||
## 演示
|
||||
|
||||
|
43
README_ja.md
43
README_ja.md
@ -20,6 +20,34 @@
|
||||
- [行動規範](./CODE_OF_CONDUCT.md)
|
||||
- [ライセンス](./LICENSE)
|
||||
|
||||
## 免責事項
|
||||
|
||||
OpenListは、OpenListチームが独立して維持するオープンソースプロジェクトであり、AGPL-3.0ライセンスに従い、完全なコードの開放性と変更の透明性を維持することに専念しています。
|
||||
|
||||
コミュニティ内で、OpenListApp/OpenListAppなど、本プロジェクトと類似した名称を持つサードパーティプロジェクトや、同一または類似した命名を採用する有料専有ソフトウェアが出現していることを確認しています。ユーザーの誤解を避けるため、以下のように宣言いたします:
|
||||
|
||||
- OpenListは、いかなるサードパーティ派生プロジェクトとも公式な関連性はありません。
|
||||
|
||||
- 本プロジェクトのすべてのソフトウェア、コード、サービスはOpenListチームによって維持され、GitHubで無料で取得できます。
|
||||
|
||||
- プロジェクトドキュメントとAPIサービスは主にCloudflareが提供する公益リソースに依存しており、現在有料プランや商業展開はなく、既存機能の使用に費用は発生しません。
|
||||
|
||||
私たちはコミュニティの自由な使用と派生開発の権利を尊重しますが、下流プロジェクトに強く呼びかけます:
|
||||
|
||||
- 「OpenList」の名前で偽装宣伝や商業利益を得るべきではありません;
|
||||
|
||||
- OpenListベースのコードをクローズドソースで配布したり、AGPLライセンス条項に違反してはいけません。
|
||||
|
||||
エコシステムの健全な発展をより良く維持するため、以下を推奨します:
|
||||
|
||||
- プロジェクトの出典を明確に示し、オープンソース精神に合致する適切なオープンソースライセンスを選択する;
|
||||
|
||||
- 商業用途が関わる場合は、「OpenList」や混乱を招く可能性のある名前をプロジェクト名として使用することを避ける;
|
||||
|
||||
- OpenListTeam/Logo下の素材を使用する必要がある場合は、協定を遵守した上で修正して使用できます。
|
||||
|
||||
OpenListプロジェクトへのご支援とご理解をありがとうございます。
|
||||
|
||||
## 特徴
|
||||
|
||||
- [x] 複数ストレージ
|
||||
@ -46,7 +74,6 @@
|
||||
- [x] [Thunder](https://pan.xunlei.com)
|
||||
- [x] [Lanzou](https://www.lanzou.com)
|
||||
- [x] [ILanzou](https://www.ilanzou.com)
|
||||
- [x] [Aliyundrive share](https://www.alipan.com)
|
||||
- [x] [Google photo](https://photos.google.com)
|
||||
- [x] [Mega.nz](https://mega.nz)
|
||||
- [x] [Baidu photo](https://photo.baidu.com)
|
||||
@ -57,6 +84,15 @@
|
||||
- [x] [FeijiPan](https://www.feijipan.com)
|
||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||
- [x] [Chaoxing](https://www.chaoxing.com)
|
||||
- [x] [CNB](https://cnb.cool/)
|
||||
- [x] [Degoo](https://degoo.com)
|
||||
- [x] [Doubao](https://www.doubao.com)
|
||||
- [x] [Febbox](https://www.febbox.com)
|
||||
- [x] [GitHub](https://github.com)
|
||||
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||
- [x] [Weiyun](https://www.weiyun.com)
|
||||
- [x] 簡単にデプロイでき、すぐに使える
|
||||
- [x] ファイルプレビュー(PDF、markdown、コード、テキストなど)
|
||||
- [x] ギャラリーモードでの画像プレビュー
|
||||
@ -78,8 +114,9 @@
|
||||
|
||||
## ドキュメント
|
||||
|
||||
- 📘 [ドキュメント・インストールガイド](https://docs.oplist.org)
|
||||
- 📚 [バックアップドキュメントサイト](https://docs.openlist.team)
|
||||
- 📘 [グローバルサイト](https://doc.oplist.org)
|
||||
- 📚 [バックアップサイト](https://doc.openlist.team)
|
||||
- 🌏 [CNサイト](https://doc.oplist.org.cn)
|
||||
|
||||
## デモ
|
||||
|
||||
|
43
README_nl.md
43
README_nl.md
@ -20,6 +20,34 @@
|
||||
- [Gedragscode](./CODE_OF_CONDUCT.md)
|
||||
- [Licentie](./LICENSE)
|
||||
|
||||
## Disclaimer
|
||||
|
||||
OpenList is een open-source project dat onafhankelijk wordt onderhouden door het OpenList Team, volgend op de AGPL-3.0 licentie en toegewijd aan het behouden van volledige code openheid en transparantie van wijzigingen.
|
||||
|
||||
We hebben gemerkt dat er in de gemeenschap enkele derde partij projecten zijn verschenen met namen vergelijkbaar met dit project, zoals OpenListApp/OpenListApp, evenals enkele betaalde eigendomssoftware die dezelfde of soortgelijke naamgeving gebruikt. Om verwarring bij gebruikers te voorkomen, verklaren we hierbij:
|
||||
|
||||
- OpenList heeft geen officiële associatie met enige derde partij afgeleide projecten.
|
||||
|
||||
- Alle software, code en diensten van dit project worden onderhouden door het OpenList Team en zijn gratis beschikbaar op GitHub.
|
||||
|
||||
- Projectdocumentatie en API diensten zijn voornamelijk afhankelijk van liefdadigheidsbronnen verstrekt door Cloudflare. Er zijn momenteel geen betaalplannen of commerciële implementaties, en het gebruik van bestaande functies brengt geen kosten met zich mee.
|
||||
|
||||
We respecteren de rechten van de gemeenschap voor vrij gebruik en afgeleide ontwikkeling, maar we roepen downstream projecten ook ten zeerste op:
|
||||
|
||||
- Mogen niet de "OpenList" naam gebruiken voor namaakpromotie of commercieel gewin;
|
||||
|
||||
- Mogen OpenList-gebaseerde code niet distribueren op een closed-source manier of AGPL licentievoorwaarden schenden.
|
||||
|
||||
Om een gezonde ecosysteemontwikkeling beter te onderhouden, bevelen we aan:
|
||||
|
||||
- Duidelijk de projectbron aangeven en passende open-source licenties kiezen in overeenstemming met de open-source geest;
|
||||
|
||||
- Bij commercieel gebruik, vermijd het gebruik van "OpenList" of enige verwarrende naamgeving als projectnaam;
|
||||
|
||||
- Als u materialen onder OpenListTeam/Logo moet gebruiken, kunt u deze wijzigen en gebruiken onder naleving van de overeenkomst.
|
||||
|
||||
Dank u voor uw ondersteuning en begrip
|
||||
|
||||
## Functies
|
||||
|
||||
- [x] Meerdere opslagmogelijkheden
|
||||
@ -46,7 +74,6 @@
|
||||
- [x] [Thunder](https://pan.xunlei.com)
|
||||
- [x] [Lanzou](https://www.lanzou.com)
|
||||
- [x] [ILanzou](https://www.ilanzou.com)
|
||||
- [x] [Aliyundrive share](https://www.alipan.com)
|
||||
- [x] [Google photo](https://photos.google.com)
|
||||
- [x] [Mega.nz](https://mega.nz)
|
||||
- [x] [Baidu photo](https://photo.baidu.com)
|
||||
@ -57,6 +84,15 @@
|
||||
- [x] [FeijiPan](https://www.feijipan.com)
|
||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||
- [x] [Chaoxing](https://www.chaoxing.com)
|
||||
- [x] [CNB](https://cnb.cool/)
|
||||
- [x] [Degoo](https://degoo.com)
|
||||
- [x] [Doubao](https://www.doubao.com)
|
||||
- [x] [Febbox](https://www.febbox.com)
|
||||
- [x] [GitHub](https://github.com)
|
||||
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||
- [x] [Weiyun](https://www.weiyun.com)
|
||||
- [x] Eenvoudig te implementeren en direct te gebruiken
|
||||
- [x] Bestandsvoorbeeld (PDF, markdown, code, platte tekst, ...)
|
||||
- [x] Afbeeldingsvoorbeeld in galerijweergave
|
||||
@ -78,8 +114,9 @@
|
||||
|
||||
## Documentatie
|
||||
|
||||
- 📘 [Documentatie & Installatiegids](https://docs.oplist.org)
|
||||
- 📚 [Back-up documentatiesite](https://docs.openlist.team)
|
||||
- 📘 [Global Site](https://doc.oplist.org)
|
||||
- 📚 [Backup Site](https://doc.openlist.team)
|
||||
- 🌏 [CN Site](https://doc.oplist.org.cn)
|
||||
|
||||
## Demo
|
||||
|
||||
|
259
build.sh
259
build.sh
@ -4,6 +4,9 @@ builtAt="$(date +'%F %T %z')"
|
||||
gitAuthor="The OpenList Projects Contributors <noreply@openlist.team>"
|
||||
gitCommit=$(git log --pretty=format:"%h" -1)
|
||||
|
||||
# Set frontend repository, default to OpenListTeam/OpenList-Frontend
|
||||
frontendRepo="${FRONTEND_REPO:-OpenListTeam/OpenList-Frontend}"
|
||||
|
||||
githubAuthArgs=""
|
||||
if [ -n "$GITHUB_TOKEN" ]; then
|
||||
githubAuthArgs="--header \"Authorization: Bearer $GITHUB_TOKEN\""
|
||||
@ -17,15 +20,15 @@ fi
|
||||
|
||||
if [ "$1" = "dev" ]; then
|
||||
version="dev"
|
||||
webVersion="dev"
|
||||
webVersion="rolling"
|
||||
elif [ "$1" = "beta" ]; then
|
||||
version="beta"
|
||||
webVersion="dev"
|
||||
webVersion="rolling"
|
||||
else
|
||||
git tag -d beta || true
|
||||
# Always true if there's no tag
|
||||
version=$(git describe --abbrev=0 --tags 2>/dev/null || echo "v0.0.0")
|
||||
webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
|
||||
webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/$frontendRepo/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
|
||||
fi
|
||||
|
||||
echo "backend version: $version"
|
||||
@ -45,30 +48,21 @@ ldflags="\
|
||||
-X 'github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=$webVersion' \
|
||||
"
|
||||
|
||||
FetchWebDev() {
|
||||
pre_release_tag=$(eval "curl -fsSL --max-time 2 $githubAuthArgs https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases" | jq -r 'map(select(.prerelease)) | first | .tag_name')
|
||||
if [ -z "$pre_release_tag" ] || [ "$pre_release_tag" == "null" ]; then
|
||||
# fall back to latest release
|
||||
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"")
|
||||
else
|
||||
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/tags/$pre_release_tag\"")
|
||||
fi
|
||||
FetchWebRolling() {
|
||||
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/tags/rolling\"")
|
||||
pre_release_assets=$(echo "$pre_release_json" | jq -r '.assets[].browser_download_url')
|
||||
|
||||
if [ "$useLite" = true ]; then
|
||||
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist-lite" | grep "\.tar\.gz$")
|
||||
else
|
||||
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist" | grep -v "lite" | grep "\.tar\.gz$")
|
||||
fi
|
||||
|
||||
curl -fsSL "$pre_release_tar_url" -o web-dist-dev.tar.gz
|
||||
# There is no lite for rolling
|
||||
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist" | grep -v "lite" | grep "\.tar\.gz$")
|
||||
|
||||
curl -fsSL "$pre_release_tar_url" -o dist.tar.gz
|
||||
rm -rf public/dist && mkdir -p public/dist
|
||||
tar -zxvf web-dist-dev.tar.gz -C public/dist
|
||||
rm -rf web-dist-dev.tar.gz
|
||||
tar -zxvf dist.tar.gz -C public/dist
|
||||
rm -rf dist.tar.gz
|
||||
}
|
||||
|
||||
FetchWebRelease() {
|
||||
release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"")
|
||||
release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/latest\"")
|
||||
release_assets=$(echo "$release_json" | jq -r '.assets[].browser_download_url')
|
||||
|
||||
if [ "$useLite" = true ]; then
|
||||
@ -95,6 +89,45 @@ BuildWinArm64() {
|
||||
go build -o "$1" -ldflags="$ldflags" -tags=jsoniter .
|
||||
}
|
||||
|
||||
BuildWin7() {
|
||||
# Setup Win7 Go compiler (patched version that supports Windows 7)
|
||||
go_version=$(go version | grep -o 'go[0-9]\+\.[0-9]\+\.[0-9]\+' | sed 's/go//')
|
||||
echo "Detected Go version: $go_version"
|
||||
|
||||
curl -fsSL --retry 3 -o go-win7.zip -H "Authorization: Bearer $GITHUB_TOKEN" \
|
||||
"https://github.com/XTLS/go-win7/releases/download/patched-${go_version}/go-for-win7-linux-amd64.zip"
|
||||
|
||||
rm -rf go-win7
|
||||
unzip go-win7.zip -d go-win7
|
||||
rm go-win7.zip
|
||||
|
||||
# Set permissions for all wrapper files
|
||||
chmod +x ./wrapper/zcc-win7
|
||||
chmod +x ./wrapper/zcxx-win7
|
||||
chmod +x ./wrapper/zcc-win7-386
|
||||
chmod +x ./wrapper/zcxx-win7-386
|
||||
|
||||
# Build for both 386 and amd64 architectures
|
||||
for arch in "386" "amd64"; do
|
||||
echo "building for windows7-${arch}"
|
||||
export GOOS=windows
|
||||
export GOARCH=${arch}
|
||||
export CGO_ENABLED=1
|
||||
|
||||
# Use architecture-specific wrapper files
|
||||
if [ "$arch" = "386" ]; then
|
||||
export CC=$(pwd)/wrapper/zcc-win7-386
|
||||
export CXX=$(pwd)/wrapper/zcxx-win7-386
|
||||
else
|
||||
export CC=$(pwd)/wrapper/zcc-win7
|
||||
export CXX=$(pwd)/wrapper/zcxx-win7
|
||||
fi
|
||||
|
||||
# Use the patched Go compiler for Win7 compatibility
|
||||
$(pwd)/go-win7/bin/go build -o "${1}-${arch}.exe" -ldflags="$ldflags" -tags=jsoniter .
|
||||
done
|
||||
}
|
||||
|
||||
BuildDev() {
|
||||
rm -rf .git/
|
||||
mkdir -p "dist"
|
||||
@ -121,8 +154,8 @@ BuildDev() {
|
||||
xgo -targets=windows/amd64,darwin/amd64,darwin/arm64 -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
|
||||
mv "$appName"-* dist
|
||||
cd dist
|
||||
cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
|
||||
upx -9 ./"$appName"-windows-amd64-upx.exe
|
||||
# cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
|
||||
# upx -9 ./"$appName"-windows-amd64-upx.exe
|
||||
find . -type f -print0 | xargs -0 md5sum >md5.txt
|
||||
cat md5.txt
|
||||
}
|
||||
@ -134,7 +167,7 @@ BuildDocker() {
|
||||
PrepareBuildDockerMusl() {
|
||||
mkdir -p build/musl-libs
|
||||
BASE="https://github.com/OpenListTeam/musl-compilers/releases/latest/download/"
|
||||
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross)
|
||||
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross loongarch64-linux-musl-cross) ## Disable s390x-linux-musl-cross builds
|
||||
for i in "${FILES[@]}"; do
|
||||
url="${BASE}${i}.tgz"
|
||||
lib_tgz="build/${i}.tgz"
|
||||
@ -153,8 +186,8 @@ BuildDockerMultiplatform() {
|
||||
docker_lflags="--extldflags '-static -fpic' $ldflags"
|
||||
export CGO_ENABLED=1
|
||||
|
||||
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x linux-riscv64 linux-ppc64le)
|
||||
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc)
|
||||
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-riscv64 linux-ppc64le linux-loong64) ## Disable linux-s390x builds
|
||||
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc loongarch64-linux-musl-gcc) ## Disable s390x-linux-musl-gcc builds
|
||||
for i in "${!OS_ARCHES[@]}"; do
|
||||
os_arch=${OS_ARCHES[$i]}
|
||||
cgo_cc=${CGO_ARGS[$i]}
|
||||
@ -186,12 +219,171 @@ BuildRelease() {
|
||||
rm -rf .git/
|
||||
mkdir -p "build"
|
||||
BuildWinArm64 ./build/"$appName"-windows-arm64.exe
|
||||
BuildWin7 ./build/"$appName"-windows7
|
||||
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
|
||||
# why? Because some target platforms seem to have issues with upx compression
|
||||
upx -9 ./"$appName"-linux-amd64
|
||||
cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
|
||||
upx -9 ./"$appName"-windows-amd64-upx.exe
|
||||
# upx -9 ./"$appName"-linux-amd64
|
||||
# cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
|
||||
# upx -9 ./"$appName"-windows-amd64-upx.exe
|
||||
mv "$appName"-* build
|
||||
|
||||
# Build LoongArch with glibc (both old world abi1.0 and new world abi2.0)
|
||||
# Separate from musl builds to avoid cache conflicts
|
||||
BuildLoongGLIBC ./build/$appName-linux-loong64-abi1.0 abi1.0
|
||||
BuildLoongGLIBC ./build/$appName-linux-loong64 abi2.0
|
||||
}
|
||||
|
||||
BuildLoongGLIBC() {
|
||||
local target_abi="$2"
|
||||
local output_file="$1"
|
||||
local oldWorldGoVersion="1.25.0"
|
||||
|
||||
if [ "$target_abi" = "abi1.0" ]; then
|
||||
echo building for linux-loong64-abi1.0
|
||||
else
|
||||
echo building for linux-loong64-abi2.0
|
||||
target_abi="abi2.0" # Default to abi2.0 if not specified
|
||||
fi
|
||||
|
||||
# Note: No longer need global cache cleanup since ABI1.0 uses isolated cache directory
|
||||
echo "Using optimized cache strategy: ABI1.0 has isolated cache, ABI2.0 uses standard cache"
|
||||
|
||||
if [ "$target_abi" = "abi1.0" ]; then
|
||||
# Setup abi1.0 toolchain and patched Go compiler similar to cgo-action implementation
|
||||
echo "Setting up Loongson old-world ABI1.0 toolchain and patched Go compiler..."
|
||||
|
||||
# Download and setup patched Go compiler for old-world
|
||||
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
||||
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250821/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
||||
-o go-loong64-abi1.0.tar.gz; then
|
||||
echo "Error: Failed to download patched Go compiler for old-world ABI1.0"
|
||||
if [ -n "$GITHUB_TOKEN" ]; then
|
||||
echo "Error output from curl:"
|
||||
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
||||
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250821/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
||||
-o go-loong64-abi1.0.tar.gz || true
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
|
||||
rm -rf go-loong64-abi1.0
|
||||
mkdir go-loong64-abi1.0
|
||||
if ! tar -xzf go-loong64-abi1.0.tar.gz -C go-loong64-abi1.0 --strip-components=1; then
|
||||
echo "Error: Failed to extract patched Go compiler"
|
||||
return 1
|
||||
fi
|
||||
rm go-loong64-abi1.0.tar.gz
|
||||
|
||||
# Download and setup GCC toolchain for old-world
|
||||
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
||||
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/loongson-gnu-toolchain-8.3.novec-x86_64-loongarch64-linux-gnu-rc1.1.tar.xz" \
|
||||
-o gcc8-loong64-abi1.0.tar.xz; then
|
||||
echo "Error: Failed to download GCC toolchain for old-world ABI1.0"
|
||||
if [ -n "$GITHUB_TOKEN" ]; then
|
||||
echo "Error output from curl:"
|
||||
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
||||
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/loongson-gnu-toolchain-8.3.novec-x86_64-loongarch64-linux-gnu-rc1.1.tar.xz" \
|
||||
-o gcc8-loong64-abi1.0.tar.xz || true
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
|
||||
rm -rf gcc8-loong64-abi1.0
|
||||
mkdir gcc8-loong64-abi1.0
|
||||
if ! tar -Jxf gcc8-loong64-abi1.0.tar.xz -C gcc8-loong64-abi1.0 --strip-components=1; then
|
||||
echo "Error: Failed to extract GCC toolchain"
|
||||
return 1
|
||||
fi
|
||||
rm gcc8-loong64-abi1.0.tar.xz
|
||||
|
||||
# Setup separate cache directory for ABI1.0 to avoid cache pollution
|
||||
abi1_cache_dir="$(pwd)/go-loong64-abi1.0-cache"
|
||||
mkdir -p "$abi1_cache_dir"
|
||||
echo "Using separate cache directory for ABI1.0: $abi1_cache_dir"
|
||||
|
||||
# Use patched Go compiler for old-world build (critical for ABI1.0 compatibility)
|
||||
echo "Building with patched Go compiler for old-world ABI1.0..."
|
||||
echo "Using isolated cache directory: $abi1_cache_dir"
|
||||
|
||||
# Use env command to set environment variables locally without affecting global environment
|
||||
if ! env GOOS=linux GOARCH=loong64 \
|
||||
CC="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc" \
|
||||
CXX="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++" \
|
||||
CGO_ENABLED=1 \
|
||||
GOCACHE="$abi1_cache_dir" \
|
||||
$(pwd)/go-loong64-abi1.0/bin/go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
|
||||
echo "Error: Build failed with patched Go compiler"
|
||||
echo "Attempting retry with cache cleanup..."
|
||||
env GOCACHE="$abi1_cache_dir" $(pwd)/go-loong64-abi1.0/bin/go clean -cache
|
||||
if ! env GOOS=linux GOARCH=loong64 \
|
||||
CC="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc" \
|
||||
CXX="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++" \
|
||||
CGO_ENABLED=1 \
|
||||
GOCACHE="$abi1_cache_dir" \
|
||||
$(pwd)/go-loong64-abi1.0/bin/go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
|
||||
echo "Error: Build failed again after cache cleanup"
|
||||
echo "Build environment details:"
|
||||
echo "GOOS=linux"
|
||||
echo "GOARCH=loong64"
|
||||
echo "CC=$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc"
|
||||
echo "CXX=$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++"
|
||||
echo "CGO_ENABLED=1"
|
||||
echo "GOCACHE=$abi1_cache_dir"
|
||||
echo "Go version: $($(pwd)/go-loong64-abi1.0/bin/go version)"
|
||||
echo "GCC version: $($(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc --version | head -1)"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
# Setup abi2.0 toolchain for new world glibc build
|
||||
echo "Setting up new-world ABI2.0 toolchain..."
|
||||
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
||||
"https://github.com/loong64/cross-tools/releases/download/20250507/x86_64-cross-tools-loongarch64-unknown-linux-gnu-legacy.tar.xz" \
|
||||
-o gcc12-loong64-abi2.0.tar.xz; then
|
||||
echo "Error: Failed to download GCC toolchain for new-world ABI2.0"
|
||||
if [ -n "$GITHUB_TOKEN" ]; then
|
||||
echo "Error output from curl:"
|
||||
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
||||
"https://github.com/loong64/cross-tools/releases/download/20250507/x86_64-cross-tools-loongarch64-unknown-linux-gnu-legacy.tar.xz" \
|
||||
-o gcc12-loong64-abi2.0.tar.xz || true
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
|
||||
rm -rf gcc12-loong64-abi2.0
|
||||
mkdir gcc12-loong64-abi2.0
|
||||
if ! tar -Jxf gcc12-loong64-abi2.0.tar.xz -C gcc12-loong64-abi2.0 --strip-components=1; then
|
||||
echo "Error: Failed to extract GCC toolchain"
|
||||
return 1
|
||||
fi
|
||||
rm gcc12-loong64-abi2.0.tar.xz
|
||||
|
||||
export GOOS=linux
|
||||
export GOARCH=loong64
|
||||
export CC=$(pwd)/gcc12-loong64-abi2.0/bin/loongarch64-unknown-linux-gnu-gcc
|
||||
export CXX=$(pwd)/gcc12-loong64-abi2.0/bin/loongarch64-unknown-linux-gnu-g++
|
||||
export CGO_ENABLED=1
|
||||
|
||||
# Use standard Go compiler for new-world build
|
||||
echo "Building with standard Go compiler for new-world ABI2.0..."
|
||||
if ! go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
|
||||
echo "Error: Build failed with standard Go compiler"
|
||||
echo "Attempting retry with cache cleanup..."
|
||||
go clean -cache
|
||||
if ! go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
|
||||
echo "Error: Build failed again after cache cleanup"
|
||||
echo "Build environment details:"
|
||||
echo "GOOS=$GOOS"
|
||||
echo "GOARCH=$GOARCH"
|
||||
echo "CC=$CC"
|
||||
echo "CXX=$CXX"
|
||||
echo "CGO_ENABLED=$CGO_ENABLED"
|
||||
echo "Go version: $(go version)"
|
||||
echo "GCC version: $($CC --version | head -1)"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
BuildReleaseLinuxMusl() {
|
||||
@ -249,6 +441,7 @@ BuildReleaseLinuxMuslArm() {
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
BuildReleaseAndroid() {
|
||||
rm -rf .git/
|
||||
mkdir -p "build"
|
||||
@ -278,6 +471,7 @@ BuildReleaseFreeBSD() {
|
||||
freebsd_version=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/freebsd/freebsd-src/tags\"" | \
|
||||
jq -r '.[].name' | \
|
||||
grep '^release/14\.' | \
|
||||
grep -v -- '-p[0-9]*$' | \
|
||||
sort -V | \
|
||||
tail -1 | \
|
||||
sed 's/release\///' | \
|
||||
@ -343,7 +537,7 @@ MakeRelease() {
|
||||
tar -czvf compress/"$i$liteSuffix".tar.gz "$appName"
|
||||
rm -f "$appName"
|
||||
done
|
||||
for i in $(find . -type f -name "$appName-windows-*"); do
|
||||
for i in $(find . -type f \( -name "$appName-windows-*" -o -name "$appName-windows7-*" \)); do
|
||||
cp "$i" "$appName".exe
|
||||
zip compress/$(echo $i | sed 's/\.[^.]*$//')$liteSuffix.zip "$appName".exe
|
||||
rm -f "$appName".exe
|
||||
@ -390,7 +584,7 @@ for arg in "$@"; do
|
||||
done
|
||||
|
||||
if [ "$buildType" = "dev" ]; then
|
||||
FetchWebDev
|
||||
FetchWebRolling
|
||||
if [ "$dockerType" = "docker" ]; then
|
||||
BuildDocker
|
||||
elif [ "$dockerType" = "docker-multiplatform" ]; then
|
||||
@ -402,7 +596,7 @@ if [ "$buildType" = "dev" ]; then
|
||||
fi
|
||||
elif [ "$buildType" = "release" -o "$buildType" = "beta" ]; then
|
||||
if [ "$buildType" = "beta" ]; then
|
||||
FetchWebDev
|
||||
FetchWebRolling
|
||||
else
|
||||
FetchWebRelease
|
||||
fi
|
||||
@ -483,4 +677,5 @@ else
|
||||
echo -e " $0 release"
|
||||
echo -e " $0 release lite"
|
||||
echo -e " $0 release docker lite"
|
||||
echo -e " $0 release linux_musl"
|
||||
fi
|
||||
|
28
cmd/admin.go
28
cmd/admin.go
@ -4,6 +4,8 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||
@ -24,10 +26,11 @@ var AdminCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed get admin user: %+v", err)
|
||||
} else {
|
||||
utils.Log.Infof("Admin user's username: %s", admin.Username)
|
||||
utils.Log.Infof("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed")
|
||||
utils.Log.Infof("You can reset the password with a random string by running [openlist admin random]")
|
||||
utils.Log.Infof("You can also set a new password by running [openlist admin set NEW_PASSWORD]")
|
||||
utils.Log.Infof("get admin user from CLI")
|
||||
fmt.Println("Admin user's username:", admin.Username)
|
||||
fmt.Println("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed")
|
||||
fmt.Println("You can reset the password with a random string by running [openlist admin random]")
|
||||
fmt.Println("You can also set a new password by running [openlist admin set NEW_PASSWORD]")
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -36,6 +39,7 @@ var RandomPasswordCmd = &cobra.Command{
|
||||
Use: "random",
|
||||
Short: "Reset admin user's password to a random string",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
utils.Log.Infof("reset admin user's password to a random string from CLI")
|
||||
newPwd := random.String(8)
|
||||
setAdminPassword(newPwd)
|
||||
},
|
||||
@ -44,12 +48,12 @@ var RandomPasswordCmd = &cobra.Command{
|
||||
var SetPasswordCmd = &cobra.Command{
|
||||
Use: "set",
|
||||
Short: "Set admin user's password",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
utils.Log.Errorf("Please enter the new password")
|
||||
return
|
||||
return fmt.Errorf("Please enter the new password")
|
||||
}
|
||||
setAdminPassword(args[0])
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@ -60,7 +64,8 @@ var ShowTokenCmd = &cobra.Command{
|
||||
Init()
|
||||
defer Release()
|
||||
token := setting.GetStr(conf.Token)
|
||||
utils.Log.Infof("Admin token: %s", token)
|
||||
utils.Log.Infof("show admin token from CLI")
|
||||
fmt.Println("Admin token:", token)
|
||||
},
|
||||
}
|
||||
|
||||
@ -77,9 +82,10 @@ func setAdminPassword(pwd string) {
|
||||
utils.Log.Errorf("failed update admin user: %+v", err)
|
||||
return
|
||||
}
|
||||
utils.Log.Infof("admin user has been updated:")
|
||||
utils.Log.Infof("username: %s", admin.Username)
|
||||
utils.Log.Infof("password: %s", pwd)
|
||||
utils.Log.Infof("admin user has been update from CLI")
|
||||
fmt.Println("admin user has been updated:")
|
||||
fmt.Println("username:", admin.Username)
|
||||
fmt.Println("password:", pwd)
|
||||
DelAdminCacheOnline()
|
||||
}
|
||||
|
||||
|
@ -4,6 +4,8 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/spf13/cobra"
|
||||
@ -24,7 +26,8 @@ var Cancel2FACmd = &cobra.Command{
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to cancel 2FA: %+v", err)
|
||||
} else {
|
||||
utils.Log.Info("2FA canceled")
|
||||
utils.Log.Infof("2FA is canceled from CLI")
|
||||
fmt.Println("2FA canceled")
|
||||
DelAdminCacheOnline()
|
||||
}
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ var RootCmd = &cobra.Command{
|
||||
Short: "A file list program that supports multiple storage.",
|
||||
Long: `A file list program that supports multiple storage,
|
||||
built with love by OpenListTeam.
|
||||
Complete documentation is available at https://docs.openlist.team/`,
|
||||
Complete documentation is available at https://doc.oplist.org/`,
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/middlewares"
|
||||
"github.com/OpenListTeam/sftpd-openlist"
|
||||
ftpserver "github.com/fclairamb/ftpserverlib"
|
||||
"github.com/gin-gonic/gin"
|
||||
@ -47,7 +48,15 @@ the address is defined in config file`,
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
}
|
||||
r := gin.New()
|
||||
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
|
||||
|
||||
// gin log
|
||||
if conf.Conf.Log.Filter.Enable {
|
||||
r.Use(middlewares.FilteredLogger())
|
||||
} else {
|
||||
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out))
|
||||
}
|
||||
r.Use(gin.RecoveryWithWriter(log.StandardLogger().Out))
|
||||
|
||||
server.Init(r)
|
||||
var httpHandler http.Handler = r
|
||||
if conf.Conf.Scheme.EnableH2c {
|
||||
@ -56,6 +65,7 @@ the address is defined in config file`,
|
||||
var httpSrv, httpsSrv, unixSrv *http.Server
|
||||
if conf.Conf.Scheme.HttpPort != -1 {
|
||||
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
|
||||
fmt.Printf("start HTTP server @ %s\n", httpBase)
|
||||
utils.Log.Infof("start HTTP server @ %s", httpBase)
|
||||
httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler}
|
||||
go func() {
|
||||
@ -67,6 +77,7 @@ the address is defined in config file`,
|
||||
}
|
||||
if conf.Conf.Scheme.HttpsPort != -1 {
|
||||
httpsBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpsPort)
|
||||
fmt.Printf("start HTTPS server @ %s\n", httpsBase)
|
||||
utils.Log.Infof("start HTTPS server @ %s", httpsBase)
|
||||
httpsSrv = &http.Server{Addr: httpsBase, Handler: r}
|
||||
go func() {
|
||||
@ -77,6 +88,7 @@ the address is defined in config file`,
|
||||
}()
|
||||
}
|
||||
if conf.Conf.Scheme.UnixFile != "" {
|
||||
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
|
||||
utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile)
|
||||
unixSrv = &http.Server{Handler: httpHandler}
|
||||
go func() {
|
||||
@ -105,6 +117,7 @@ the address is defined in config file`,
|
||||
s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
|
||||
server.InitS3(s3r)
|
||||
s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port)
|
||||
fmt.Printf("start S3 server @ %s\n", s3Base)
|
||||
utils.Log.Infof("start S3 server @ %s", s3Base)
|
||||
go func() {
|
||||
var err error
|
||||
@ -129,6 +142,7 @@ the address is defined in config file`,
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to start ftp driver: %s", err.Error())
|
||||
} else {
|
||||
fmt.Printf("start ftp server on %s\n", conf.Conf.FTP.Listen)
|
||||
utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen)
|
||||
go func() {
|
||||
ftpServer = ftpserver.NewFtpServer(ftpDriver)
|
||||
@ -147,6 +161,7 @@ the address is defined in config file`,
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to start sftp driver: %s", err.Error())
|
||||
} else {
|
||||
fmt.Printf("start sftp server on %s", conf.Conf.SFTP.Listen)
|
||||
utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen)
|
||||
go func() {
|
||||
sftpServer = sftpd.NewSftpServer(sftpDriver)
|
||||
|
@ -4,6 +4,7 @@ Copyright © 2023 NAME HERE <EMAIL ADDRESS>
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
@ -22,28 +23,61 @@ var storageCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
var disableStorageCmd = &cobra.Command{
|
||||
Use: "disable",
|
||||
Short: "Disable a storage",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Use: "disable [mount path]",
|
||||
Short: "Disable a storage by mount path",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
utils.Log.Errorf("mount path is required")
|
||||
return
|
||||
return fmt.Errorf("mount path is required")
|
||||
}
|
||||
mountPath := args[0]
|
||||
Init()
|
||||
defer Release()
|
||||
storage, err := db.GetStorageByMountPath(mountPath)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to query storage: %+v", err)
|
||||
} else {
|
||||
storage.Disabled = true
|
||||
err = db.UpdateStorage(storage)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to update storage: %+v", err)
|
||||
} else {
|
||||
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath)
|
||||
return fmt.Errorf("failed to query storage: %+v", err)
|
||||
}
|
||||
storage.Disabled = true
|
||||
err = db.UpdateStorage(storage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update storage: %+v", err)
|
||||
}
|
||||
utils.Log.Infof("Storage with mount path [%s] has been disabled from CLI", mountPath)
|
||||
fmt.Printf("Storage with mount path [%s] has been disabled\n", mountPath)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var deleteStorageCmd = &cobra.Command{
|
||||
Use: "delete [id]",
|
||||
Short: "Delete a storage by id",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("id is required")
|
||||
}
|
||||
id, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("id must be a number")
|
||||
}
|
||||
|
||||
if force, _ := cmd.Flags().GetBool("force"); force {
|
||||
fmt.Printf("Are you sure you want to delete storage with id [%d]? [y/N]: ", id)
|
||||
var confirm string
|
||||
fmt.Scanln(&confirm)
|
||||
if confirm != "y" && confirm != "Y" {
|
||||
fmt.Println("Delete operation cancelled.")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
Init()
|
||||
defer Release()
|
||||
err = db.DeleteStorageById(uint(id))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete storage by id: %+v", err)
|
||||
}
|
||||
utils.Log.Infof("Storage with id [%d] have been deleted from CLI", id)
|
||||
fmt.Printf("Storage with id [%d] have been deleted\n", id)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@ -88,14 +122,14 @@ var storageTableHeight int
|
||||
var listStorageCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all storages",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
Init()
|
||||
defer Release()
|
||||
storages, _, err := db.GetStorages(1, -1)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to query storages: %+v", err)
|
||||
return fmt.Errorf("failed to query storages: %+v", err)
|
||||
} else {
|
||||
utils.Log.Infof("Found %d storages", len(storages))
|
||||
fmt.Printf("Found %d storages\n", len(storages))
|
||||
columns := []table.Column{
|
||||
{Title: "ID", Width: 4},
|
||||
{Title: "Driver", Width: 16},
|
||||
@ -138,10 +172,11 @@ var listStorageCmd = &cobra.Command{
|
||||
|
||||
m := model{t}
|
||||
if _, err := tea.NewProgram(m).Run(); err != nil {
|
||||
utils.Log.Errorf("failed to run program: %+v", err)
|
||||
fmt.Printf("failed to run program: %+v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@ -151,6 +186,8 @@ func init() {
|
||||
storageCmd.AddCommand(disableStorageCmd)
|
||||
storageCmd.AddCommand(listStorageCmd)
|
||||
storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height")
|
||||
storageCmd.AddCommand(deleteStorageCmd)
|
||||
deleteStorageCmd.Flags().BoolP("force", "f", false, "Force delete without confirmation")
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
|
@ -6,10 +6,9 @@ services:
|
||||
ports:
|
||||
- '5244:5244'
|
||||
- '5245:5245'
|
||||
user: '0:0'
|
||||
environment:
|
||||
- PUID=0
|
||||
- PGID=0
|
||||
- UMASK=022
|
||||
- TZ=UTC
|
||||
- TZ=Asia/Shanghai
|
||||
container_name: openlist
|
||||
image: 'openlistteam/openlist:latest'
|
||||
|
@ -1,43 +1,60 @@
|
||||
package _115
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
md5Salt = "Qclm8MGWUv59TnrR0XPg"
|
||||
appVer = "27.0.5.7"
|
||||
appVer = "35.6.0.3"
|
||||
)
|
||||
|
||||
func (d *Pan115) getAppVersion() ([]driver115.AppVersion, error) {
|
||||
result := driver115.VersionResp{}
|
||||
resp, err := base.RestyClient.R().Get(driver115.ApiGetVersion)
|
||||
|
||||
err = driver115.CheckErr(err, &result, resp)
|
||||
func (d *Pan115) getAppVersion() (string, error) {
|
||||
result := VersionResp{}
|
||||
res, err := base.RestyClient.R().Get(driver115.ApiGetVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
return result.Data.GetAppVersions(), nil
|
||||
err = utils.Json.Unmarshal(res.Body(), &result)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(result.Error) > 0 {
|
||||
return "", errors.New(result.Error)
|
||||
}
|
||||
return result.Data.Win.Version, nil
|
||||
}
|
||||
|
||||
func (d *Pan115) getAppVer() string {
|
||||
// todo add some cache?
|
||||
vers, err := d.getAppVersion()
|
||||
ver, err := d.getAppVersion()
|
||||
if err != nil {
|
||||
log.Warnf("[115] get app version failed: %v", err)
|
||||
return appVer
|
||||
}
|
||||
for _, ver := range vers {
|
||||
if ver.AppName == "win" {
|
||||
return ver.Version
|
||||
}
|
||||
if len(ver) > 0 {
|
||||
return ver
|
||||
}
|
||||
return appVer
|
||||
}
|
||||
|
||||
func (d *Pan115) initAppVer() {
|
||||
appVer = d.getAppVer()
|
||||
log.Debugf("use app version: %v", appVer)
|
||||
}
|
||||
|
||||
type VersionResp struct {
|
||||
Error string `json:"error,omitempty"`
|
||||
Data Versions `json:"data"`
|
||||
}
|
||||
|
||||
type Versions struct {
|
||||
Win Version `json:"win"`
|
||||
}
|
||||
|
||||
type Version struct {
|
||||
Version string `json:"version_code"`
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
preHash = strings.ToUpper(preHash)
|
||||
fullHash := stream.GetHash().GetHash(utils.SHA1)
|
||||
if len(fullHash) != utils.SHA1.Width {
|
||||
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA1)
|
||||
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -18,7 +18,6 @@ var config = driver.Config{
|
||||
Name: "115 Cloud",
|
||||
DefaultRoot: "0",
|
||||
// OnlyProxy: true,
|
||||
// OnlyLocal: true,
|
||||
// NoOverwriteUpload: true,
|
||||
}
|
||||
|
||||
|
@ -321,7 +321,7 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
|
||||
err error
|
||||
)
|
||||
|
||||
tmpF, err := s.CacheFullInTempFile()
|
||||
tmpF, err := s.CacheFullAndWriter(&up, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
sdk "github.com/OpenListTeam/115-sdk-go"
|
||||
"github.com/OpenListTeam/OpenList/v4/cmd/flags"
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
@ -16,7 +17,6 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
sdk "github.com/OpenListTeam/115-sdk-go"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
@ -131,6 +131,23 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Open115) GetObjInfo(ctx context.Context, path string) (model.Obj, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := d.client.GetFolderInfoByPath(ctx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Obj{
|
||||
Fid: resp.FileID,
|
||||
Fn: resp.FileName,
|
||||
Fc: resp.FileCategory,
|
||||
Sha1: resp.Sha1,
|
||||
Pc: resp.PickCode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return nil, err
|
||||
@ -222,7 +239,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
||||
}
|
||||
sha1 := file.GetHash().GetHash(utils.SHA1)
|
||||
if len(sha1) != utils.SHA1.Width {
|
||||
_, sha1, err = stream.CacheFullInTempFileAndHash(file, utils.SHA1)
|
||||
_, sha1, err = stream.CacheFullAndHash(file, &up, utils.SHA1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -252,6 +269,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
||||
return err
|
||||
}
|
||||
if resp.Status == 2 {
|
||||
up(100)
|
||||
return nil
|
||||
}
|
||||
// 2. two way verify
|
||||
@ -286,6 +304,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
||||
return err
|
||||
}
|
||||
if resp.Status == 2 {
|
||||
up(100)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -302,6 +321,43 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Open115) OfflineDownload(ctx context.Context, uris []string, dstDir model.Obj) ([]string, error) {
|
||||
return d.client.AddOfflineTaskURIs(ctx, uris, dstDir.GetID())
|
||||
}
|
||||
|
||||
func (d *Open115) DeleteOfflineTask(ctx context.Context, infoHash string, deleteFiles bool) error {
|
||||
return d.client.DeleteOfflineTask(ctx, infoHash, deleteFiles)
|
||||
}
|
||||
|
||||
func (d *Open115) OfflineList(ctx context.Context) (*sdk.OfflineTaskListResp, error) {
|
||||
resp, err := d.client.OfflineTaskList(ctx, 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (d *Open115) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
userInfo, err := d.client.UserInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total, err := userInfo.RtSpaceInfo.AllTotal.Size.Int64()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
free, err := userInfo.RtSpaceInfo.AllRemain.Size.Int64()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: uint64(total),
|
||||
FreeSpace: uint64(free),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||
// return nil, errs.NotImplement
|
||||
|
@ -11,23 +11,14 @@ type Addition struct {
|
||||
// define other
|
||||
OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
|
||||
LimitRate float64 `json:"limit_rate,string" type:"float" default:"1" help:"limit all api request rate ([limit]r/1s)"`
|
||||
LimitRate float64 `json:"limit_rate" type:"float" default:"1" help:"limit all api request rate ([limit]r/1s)"`
|
||||
AccessToken string `json:"access_token" required:"true"`
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "115 Open",
|
||||
LocalSort: false,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "0",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: false,
|
||||
Name: "115 Open",
|
||||
DefaultRoot: "0",
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -6,12 +6,13 @@ import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
sdk "github.com/OpenListTeam/115-sdk-go"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/avast/retry-go"
|
||||
sdk "github.com/OpenListTeam/115-sdk-go"
|
||||
)
|
||||
|
||||
func calPartSize(fileSize int64) int64 {
|
||||
@ -69,9 +70,6 @@ func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp
|
||||
// }
|
||||
|
||||
func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
|
||||
fileSize := stream.GetSize()
|
||||
chunkSize := calPartSize(fileSize)
|
||||
|
||||
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
|
||||
if err != nil {
|
||||
return err
|
||||
@ -86,6 +84,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
|
||||
return err
|
||||
}
|
||||
|
||||
fileSize := stream.GetSize()
|
||||
chunkSize := calPartSize(fileSize)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
|
||||
parts := make([]oss.UploadPart, partNum)
|
||||
offset := int64(0)
|
||||
@ -98,10 +103,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
|
||||
if i == partNum {
|
||||
partSize = fileSize - (i-1)*chunkSize
|
||||
}
|
||||
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
||||
rd, err := ss.GetSectionReader(offset, partSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
||||
err = retry.Do(func() error {
|
||||
_ = rd.Reset()
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
||||
rd.Seek(0, io.SeekStart)
|
||||
part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i))
|
||||
if err != nil {
|
||||
return err
|
||||
@ -112,6 +120,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second))
|
||||
ss.FreeSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -121,7 +130,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
|
||||
} else {
|
||||
offset += partSize
|
||||
}
|
||||
up(float64(offset) / float64(fileSize))
|
||||
up(float64(offset) * 100 / float64(fileSize))
|
||||
}
|
||||
|
||||
// callbackRespBytes := make([]byte, 1024)
|
||||
|
@ -19,12 +19,7 @@ type Addition struct {
|
||||
var config = driver.Config{
|
||||
Name: "115 Share",
|
||||
DefaultRoot: "0",
|
||||
// OnlyProxy: true,
|
||||
// OnlyLocal: true,
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: true,
|
||||
NoUpload: true,
|
||||
NoUpload: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -64,14 +64,6 @@ func (d *Pan123) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
|
||||
|
||||
func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if f, ok := file.(File); ok {
|
||||
//var resp DownResp
|
||||
var headers map[string]string
|
||||
if !utils.IsLocalIPAddr(args.IP) {
|
||||
headers = map[string]string{
|
||||
//"X-Real-IP": "1.1.1.1",
|
||||
"X-Forwarded-For": args.IP,
|
||||
}
|
||||
}
|
||||
data := base.Json{
|
||||
"driveId": 0,
|
||||
"etag": f.Etag,
|
||||
@ -83,25 +75,27 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
}
|
||||
resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
|
||||
|
||||
req.SetBody(data).SetHeaders(headers)
|
||||
req.SetBody(data)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
downloadUrl := utils.Json.Get(resp, "data", "DownloadUrl").ToString()
|
||||
u, err := url.Parse(downloadUrl)
|
||||
ou, err := url.Parse(downloadUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nu := u.Query().Get("params")
|
||||
u_ := ou.String()
|
||||
nu := ou.Query().Get("params")
|
||||
if nu != "" {
|
||||
du, _ := base64.StdEncoding.DecodeString(nu)
|
||||
u, err = url.Parse(string(du))
|
||||
u, err := url.Parse(string(du))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u_ = u.String()
|
||||
}
|
||||
u_ := u.String()
|
||||
|
||||
log.Debug("download url: ", u_)
|
||||
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
|
||||
if err != nil {
|
||||
@ -118,7 +112,7 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
|
||||
}
|
||||
link.Header = http.Header{
|
||||
"Referer": []string{"https://www.123pan.com/"},
|
||||
"Referer": []string{fmt.Sprintf("%s://%s/", ou.Scheme, ou.Host)},
|
||||
}
|
||||
return &link, nil
|
||||
} else {
|
||||
@ -188,7 +182,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
|
||||
etag := file.GetHash().GetHash(utils.MD5)
|
||||
var err error
|
||||
if len(etag) < utils.MD5.Width {
|
||||
_, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5)
|
||||
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -11,7 +11,8 @@ type Addition struct {
|
||||
driver.RootID
|
||||
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
|
||||
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
AccessToken string
|
||||
AccessToken string
|
||||
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
@ -22,6 +23,11 @@ var config = driver.Config{
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Pan123{}
|
||||
// 新增默认选项 要在RegisterDriver初始化设置 才会对正在使用的用户生效
|
||||
return &Pan123{
|
||||
Addition: Addition{
|
||||
UploadThread: 3,
|
||||
},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ func (f File) CreateTime() time.Time {
|
||||
}
|
||||
|
||||
func (f File) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
return utils.NewHashInfo(utils.MD5, f.Etag)
|
||||
}
|
||||
|
||||
func (f File) GetPath() string {
|
||||
|
@ -6,11 +6,16 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
@ -69,18 +74,21 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
|
||||
}
|
||||
|
||||
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tmpF, err := file.CacheFullInTempFile()
|
||||
// fetch s3 pre signed urls
|
||||
size := file.GetSize()
|
||||
chunkSize := int64(16 * utils.MB)
|
||||
chunkCount := 1
|
||||
if size > chunkSize {
|
||||
chunkCount = int((size + chunkSize - 1) / chunkSize)
|
||||
}
|
||||
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// fetch s3 pre signed urls
|
||||
size := file.GetSize()
|
||||
chunkSize := min(size, 16*utils.MB)
|
||||
chunkCount := int(size / chunkSize)
|
||||
|
||||
lastChunkSize := size % chunkSize
|
||||
if lastChunkSize > 0 {
|
||||
chunkCount++
|
||||
} else {
|
||||
if lastChunkSize == 0 {
|
||||
lastChunkSize = chunkSize
|
||||
}
|
||||
// only 1 batch is allowed
|
||||
@ -90,73 +98,99 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
batchSize = 10
|
||||
getS3UploadUrl = d.getS3PreSignedUrls
|
||||
}
|
||||
|
||||
thread := min(int(chunkCount), d.UploadThread)
|
||||
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
for i := 1; i <= chunkCount; i += batchSize {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
break
|
||||
}
|
||||
start := i
|
||||
end := min(i+batchSize, chunkCount+1)
|
||||
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end)
|
||||
s3PreSignedUrls, err := getS3UploadUrl(uploadCtx, upReq, start, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// upload each chunk
|
||||
for j := start; j < end; j++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
for cur := start; cur < end; cur++ {
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
break
|
||||
}
|
||||
offset := int64(cur-1) * chunkSize
|
||||
curSize := chunkSize
|
||||
if j == chunkCount {
|
||||
if cur == chunkCount {
|
||||
curSize = lastChunkSize
|
||||
}
|
||||
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.NewSectionReader(tmpF, chunkSize*int64(j-1), curSize), curSize, false, getS3UploadUrl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(float64(j) * 100 / float64(chunkCount))
|
||||
var reader *stream.SectionReader
|
||||
var rateLimitedRd io.Reader
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
if reader == nil {
|
||||
var err error
|
||||
reader, err = ss.GetSectionReader(offset, curSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Do: func(ctx context.Context) error {
|
||||
reader.Seek(0, io.SeekStart)
|
||||
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
|
||||
if uploadUrl == "" {
|
||||
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
|
||||
}
|
||||
reader.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, rateLimitedRd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.ContentLength = curSize
|
||||
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusForbidden {
|
||||
singleflight.AnyGroup.Do(fmt.Sprintf("Pan123.newUpload_%p", threadG), func() (any, error) {
|
||||
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
|
||||
return nil, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
|
||||
}
|
||||
progress := 10.0 + 85.0*float64(threadG.Success())/float64(chunkCount)
|
||||
up(progress)
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
ss.FreeSectionReader(reader)
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
if err := threadG.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer up(100)
|
||||
// complete s3 upload
|
||||
return d.completeS3(ctx, upReq, file, chunkCount > 1)
|
||||
}
|
||||
|
||||
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader *io.SectionReader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
|
||||
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
|
||||
if uploadUrl == "" {
|
||||
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
|
||||
}
|
||||
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, reader))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = curSize
|
||||
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusForbidden {
|
||||
if retry {
|
||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
|
||||
}
|
||||
// refresh s3 pre signed urls
|
||||
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
|
||||
// retry
|
||||
reader.Seek(0, io.SeekStart)
|
||||
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -2,7 +2,9 @@ package _123_open
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
@ -15,6 +17,7 @@ import (
|
||||
type Open123 struct {
|
||||
model.Storage
|
||||
Addition
|
||||
UID uint64
|
||||
}
|
||||
|
||||
func (d *Open123) Config() driver.Config {
|
||||
@ -67,13 +70,45 @@ func (d *Open123) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
||||
func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
fileId, _ := strconv.ParseInt(file.GetID(), 10, 64)
|
||||
|
||||
if d.DirectLink {
|
||||
res, err := d.getDirectLink(fileId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if d.DirectLinkPrivateKey == "" {
|
||||
duration := 365 * 24 * time.Hour // 缓存1年
|
||||
return &model.Link{
|
||||
URL: res.Data.URL,
|
||||
Expiration: &duration,
|
||||
}, nil
|
||||
}
|
||||
|
||||
uid, err := d.getUID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
duration := time.Duration(d.DirectLinkValidDuration) * time.Minute
|
||||
|
||||
newURL, err := d.SignURL(res.Data.URL, d.DirectLinkPrivateKey,
|
||||
uid, duration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: newURL,
|
||||
Expiration: &duration,
|
||||
}, nil
|
||||
}
|
||||
|
||||
res, err := d.getDownloadInfo(fileId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
link := model.Link{URL: res.Data.DownloadUrl}
|
||||
return &link, nil
|
||||
return &model.Link{URL: res.Data.DownloadUrl}, nil
|
||||
}
|
||||
|
||||
func (d *Open123) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
@ -95,6 +130,22 @@ func (d *Open123) Rename(ctx context.Context, srcObj model.Obj, newName string)
|
||||
}
|
||||
|
||||
func (d *Open123) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
// 尝试使用上传+MD5秒传功能实现复制
|
||||
// 1. 创建文件
|
||||
// parentFileID 父目录id,上传到根目录时填写 0
|
||||
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse parentFileID error: %v", err)
|
||||
}
|
||||
etag := srcObj.(File).Etag
|
||||
createResp, err := d.create(parentFileId, srcObj.GetName(), etag, srcObj.GetSize(), 2, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 是否秒传
|
||||
if createResp.Data.Reuse {
|
||||
return nil
|
||||
}
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
@ -104,26 +155,79 @@ func (d *Open123) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return d.trash(fileId)
|
||||
}
|
||||
|
||||
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 1. 创建文件
|
||||
// parentFileID 父目录id,上传到根目录时填写 0
|
||||
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse parentFileID error: %v", err)
|
||||
}
|
||||
// etag 文件md5
|
||||
etag := file.GetHash().GetHash(utils.MD5)
|
||||
|
||||
if len(etag) < utils.MD5.Width {
|
||||
_, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5)
|
||||
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// 是否秒传
|
||||
if createResp.Data.Reuse {
|
||||
return nil
|
||||
// 秒传成功才会返回正确的 FileID,否则为 0
|
||||
if createResp.Data.FileID != 0 {
|
||||
return File{
|
||||
FileName: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
FileId: createResp.Data.FileID,
|
||||
Type: 2,
|
||||
Etag: etag,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
up(10)
|
||||
|
||||
return d.Upload(ctx, file, createResp, up)
|
||||
// 2. 上传分片
|
||||
err = d.Upload(ctx, file, createResp, up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 3. 上传完毕
|
||||
for range 60 {
|
||||
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
|
||||
// 返回错误代码未知,如:20103,文档也没有具体说
|
||||
if err == nil && uploadCompleteResp.Data.Completed && uploadCompleteResp.Data.FileID != 0 {
|
||||
up(100)
|
||||
return File{
|
||||
FileName: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
FileId: uploadCompleteResp.Data.FileID,
|
||||
Type: 2,
|
||||
Etag: etag,
|
||||
}, nil
|
||||
}
|
||||
// 若接口返回的completed为 false 时,则需间隔1秒继续轮询此接口,获取上传最终结果。
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return nil, fmt.Errorf("upload complete timeout")
|
||||
}
|
||||
|
||||
func (d *Open123) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
userInfo, err := d.getUserInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total := userInfo.Data.SpacePermanent + userInfo.Data.SpaceTemp
|
||||
free := total - userInfo.Data.SpaceUsed
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Open123)(nil)
|
||||
var _ driver.PutResult = (*Open123)(nil)
|
||||
|
@ -23,6 +23,11 @@ type Addition struct {
|
||||
// 上传线程数
|
||||
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
|
||||
|
||||
// 使用直链
|
||||
DirectLink bool `json:"DirectLink" type:"bool" default:"false" required:"false" help:"use direct link when download file"`
|
||||
DirectLinkPrivateKey string `json:"DirectLinkPrivateKey" required:"false" help:"private key for direct link, if URL authentication is enabled"`
|
||||
DirectLinkValidDuration int64 `json:"DirectLinkValidDuration" type:"number" default:"30" required:"false" help:"minutes, if URL authentication is enabled"`
|
||||
|
||||
driver.RootID
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,9 @@ func (f File) GetName() string {
|
||||
}
|
||||
|
||||
func (f File) CreateTime() time.Time {
|
||||
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.CreateAt)
|
||||
// 返回的时间没有时区信息,默认 UTC+8
|
||||
loc := time.FixedZone("UTC+8", 8*60*60)
|
||||
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.CreateAt, loc)
|
||||
if err != nil {
|
||||
return time.Now()
|
||||
}
|
||||
@ -81,7 +83,9 @@ func (f File) CreateTime() time.Time {
|
||||
}
|
||||
|
||||
func (f File) ModTime() time.Time {
|
||||
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.UpdateAt)
|
||||
// 返回的时间没有时区信息,默认 UTC+8
|
||||
loc := time.FixedZone("UTC+8", 8*60*60)
|
||||
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.UpdateAt, loc)
|
||||
if err != nil {
|
||||
return time.Now()
|
||||
}
|
||||
@ -123,19 +127,19 @@ type RefreshTokenResp struct {
|
||||
type UserInfoResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
UID int64 `json:"uid"`
|
||||
Username string `json:"username"`
|
||||
DisplayName string `json:"displayName"`
|
||||
HeadImage string `json:"headImage"`
|
||||
Passport string `json:"passport"`
|
||||
Mail string `json:"mail"`
|
||||
SpaceUsed int64 `json:"spaceUsed"`
|
||||
SpacePermanent int64 `json:"spacePermanent"`
|
||||
SpaceTemp int64 `json:"spaceTemp"`
|
||||
SpaceTempExpr string `json:"spaceTempExpr"`
|
||||
Vip bool `json:"vip"`
|
||||
DirectTraffic int64 `json:"directTraffic"`
|
||||
IsHideUID bool `json:"isHideUID"`
|
||||
UID uint64 `json:"uid"`
|
||||
// Username string `json:"username"`
|
||||
// DisplayName string `json:"displayName"`
|
||||
// HeadImage string `json:"headImage"`
|
||||
// Passport string `json:"passport"`
|
||||
// Mail string `json:"mail"`
|
||||
SpaceUsed uint64 `json:"spaceUsed"`
|
||||
SpacePermanent uint64 `json:"spacePermanent"`
|
||||
SpaceTemp uint64 `json:"spaceTemp"`
|
||||
// SpaceTempExpr int64 `json:"spaceTempExpr"`
|
||||
// Vip bool `json:"vip"`
|
||||
// DirectTraffic int64 `json:"directTraffic"`
|
||||
// IsHideUID bool `json:"isHideUID"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
@ -154,52 +158,30 @@ type DownloadInfoResp struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type DirectLinkResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
URL string `json:"url"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// 创建文件V2返回
|
||||
type UploadCreateResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
FileID int64 `json:"fileID"`
|
||||
PreuploadID string `json:"preuploadID"`
|
||||
Reuse bool `json:"reuse"`
|
||||
SliceSize int64 `json:"sliceSize"`
|
||||
FileID int64 `json:"fileID"`
|
||||
PreuploadID string `json:"preuploadID"`
|
||||
Reuse bool `json:"reuse"`
|
||||
SliceSize int64 `json:"sliceSize"`
|
||||
Servers []string `json:"servers"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UploadUrlResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
PresignedURL string `json:"presignedURL"`
|
||||
}
|
||||
}
|
||||
|
||||
// 上传完毕V2返回
|
||||
type UploadCompleteResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
Async bool `json:"async"`
|
||||
Completed bool `json:"completed"`
|
||||
FileID int64 `json:"fileID"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UploadAsyncResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
Completed bool `json:"completed"`
|
||||
FileID int64 `json:"fileID"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UploadResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
AccessKeyId string `json:"AccessKeyId"`
|
||||
Bucket string `json:"Bucket"`
|
||||
Key string `json:"Key"`
|
||||
SecretAccessKey string `json:"SecretAccessKey"`
|
||||
SessionToken string `json:"SessionToken"`
|
||||
FileId int64 `json:"FileId"`
|
||||
Reuse bool `json:"Reuse"`
|
||||
EndPoint string `json:"EndPoint"`
|
||||
StorageNode string `json:"StorageNode"`
|
||||
UploadId string `json:"UploadId"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
@ -1,21 +1,28 @@
|
||||
package _123_open
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
// 创建文件 V2
|
||||
func (d *Open123) create(parentFileID int64, filename string, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) {
|
||||
var resp UploadCreateResp
|
||||
_, err := d.Request(UploadCreate, http.MethodPost, func(req *resty.Request) {
|
||||
@ -34,21 +41,136 @@ func (d *Open123) create(parentFileID int64, filename string, etag string, size
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) url(preuploadID string, sliceNo int64) (string, error) {
|
||||
// get upload url
|
||||
var resp UploadUrlResp
|
||||
_, err := d.Request(UploadUrl, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"preuploadId": preuploadID,
|
||||
"sliceNo": sliceNo,
|
||||
})
|
||||
}, &resp)
|
||||
// 上传分片 V2
|
||||
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
|
||||
uploadDomain := createResp.Data.Servers[0]
|
||||
size := file.GetSize()
|
||||
chunkSize := createResp.Data.SliceSize
|
||||
|
||||
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return err
|
||||
}
|
||||
return resp.Data.PresignedURL, nil
|
||||
|
||||
uploadNums := (size + chunkSize - 1) / chunkSize
|
||||
thread := min(int(uploadNums), d.UploadThread)
|
||||
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
for partIndex := range uploadNums {
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
break
|
||||
}
|
||||
partIndex := partIndex
|
||||
partNumber := partIndex + 1 // 分片号从1开始
|
||||
offset := partIndex * chunkSize
|
||||
size := min(chunkSize, size-offset)
|
||||
var reader *stream.SectionReader
|
||||
var rateLimitedRd io.Reader
|
||||
sliceMD5 := ""
|
||||
// 表单
|
||||
b := bytes.NewBuffer(make([]byte, 0, 2048))
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
if reader == nil {
|
||||
var err error
|
||||
// 每个分片一个reader
|
||||
reader, err = ss.GetSectionReader(offset, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 计算当前分片的MD5
|
||||
sliceMD5, err = utils.HashReader(utils.MD5, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Do: func(ctx context.Context) error {
|
||||
// 重置分片reader位置,因为HashReader、上一次失败已经读取到分片EOF
|
||||
reader.Seek(0, io.SeekStart)
|
||||
|
||||
b.Reset()
|
||||
w := multipart.NewWriter(b)
|
||||
// 添加表单字段
|
||||
err = w.WriteField("preuploadID", createResp.Data.PreuploadID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.WriteField("sliceNo", strconv.FormatInt(partNumber, 10))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.WriteField("sliceMD5", sliceMD5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 写入文件内容
|
||||
_, err = w.CreateFormFile("slice", fmt.Sprintf("%s.part%d", file.GetName(), partNumber))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headSize := b.Len()
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
head := bytes.NewReader(b.Bytes()[:headSize])
|
||||
tail := bytes.NewReader(b.Bytes()[headSize:])
|
||||
rateLimitedRd = driver.NewLimitedUploadStream(ctx, io.MultiReader(head, reader, tail))
|
||||
// 创建请求并设置header
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadDomain+"/upload/v2/file/slice", rateLimitedRd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 设置请求头
|
||||
req.Header.Add("Authorization", "Bearer "+d.AccessToken)
|
||||
req.Header.Add("Content-Type", w.FormDataContentType())
|
||||
req.Header.Add("Platform", "open_platform")
|
||||
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
return fmt.Errorf("slice %d upload failed, status code: %d", partNumber, res.StatusCode)
|
||||
}
|
||||
var resp BaseResp
|
||||
respBody, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal(respBody, &resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Code != 0 {
|
||||
return fmt.Errorf("slice %d upload failed: %s", partNumber, resp.Message)
|
||||
}
|
||||
|
||||
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
|
||||
up(progress)
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
ss.FreeSectionReader(reader)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if err := threadG.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// 上传完毕
|
||||
func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
|
||||
var resp UploadCompleteResp
|
||||
_, err := d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
|
||||
@ -61,91 +183,3 @@ func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) async(preuploadID string) (*UploadAsyncResp, error) {
|
||||
var resp UploadAsyncResp
|
||||
_, err := d.Request(UploadAsync, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"preuploadID": preuploadID,
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
|
||||
size := file.GetSize()
|
||||
chunkSize := createResp.Data.SliceSize
|
||||
uploadNums := (size + chunkSize - 1) / chunkSize
|
||||
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.UploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
for partIndex := int64(0); partIndex < uploadNums; partIndex++ {
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
partIndex := partIndex
|
||||
partNumber := partIndex + 1 // 分片号从1开始
|
||||
offset := partIndex * chunkSize
|
||||
size := min(chunkSize, size-offset)
|
||||
limitedReader, err := file.RangeRead(http_range.Range{
|
||||
Start: offset,
|
||||
Length: size})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
limitedReader = driver.NewLimitedUploadStream(ctx, limitedReader)
|
||||
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
uploadPartUrl, err := d.url(createResp.Data.PreuploadID, partNumber)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "PUT", uploadPartUrl, limitedReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = size
|
||||
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
|
||||
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
|
||||
up(progress)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := threadG.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uploadCompleteResp.Data.Async == false || uploadCompleteResp.Data.Completed {
|
||||
return nil
|
||||
}
|
||||
|
||||
for {
|
||||
uploadAsyncResp, err := d.async(createResp.Data.PreuploadID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uploadAsyncResp.Data.Completed {
|
||||
break
|
||||
}
|
||||
}
|
||||
up(100)
|
||||
return nil
|
||||
}
|
||||
|
@ -1,15 +1,20 @@
|
||||
package _123_open
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/google/uuid"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -19,16 +24,15 @@ var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于
|
||||
AccessToken = InitApiInfo(Api+"/api/v1/access_token", 1)
|
||||
RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1)
|
||||
UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1)
|
||||
FileList = InitApiInfo(Api+"/api/v2/file/list", 4)
|
||||
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 0)
|
||||
FileList = InitApiInfo(Api+"/api/v2/file/list", 3)
|
||||
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 5)
|
||||
DirectLink = InitApiInfo(Api+"/api/v1/direct-link/url", 5)
|
||||
Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2)
|
||||
Move = InitApiInfo(Api+"/api/v1/file/move", 1)
|
||||
Rename = InitApiInfo(Api+"/api/v1/file/name", 1)
|
||||
Trash = InitApiInfo(Api+"/api/v1/file/trash", 2)
|
||||
UploadCreate = InitApiInfo(Api+"/upload/v1/file/create", 2)
|
||||
UploadUrl = InitApiInfo(Api+"/upload/v1/file/get_upload_url", 0)
|
||||
UploadComplete = InitApiInfo(Api+"/upload/v1/file/upload_complete", 0)
|
||||
UploadAsync = InitApiInfo(Api+"/upload/v1/file/upload_async_result", 1)
|
||||
UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2)
|
||||
UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0)
|
||||
)
|
||||
|
||||
func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
@ -82,8 +86,24 @@ func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCall
|
||||
}
|
||||
|
||||
func (d *Open123) flushAccessToken() error {
|
||||
if d.Addition.ClientID != "" {
|
||||
if d.Addition.ClientSecret != "" {
|
||||
if d.ClientID != "" {
|
||||
if d.RefreshToken != "" {
|
||||
var resp RefreshTokenResp
|
||||
_, err := d.Request(RefreshToken, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetQueryParam("client_id", d.ClientID)
|
||||
if d.ClientSecret != "" {
|
||||
req.SetQueryParam("client_secret", d.ClientSecret)
|
||||
}
|
||||
req.SetQueryParam("grant_type", "refresh_token")
|
||||
req.SetQueryParam("refresh_token", d.RefreshToken)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.AccessToken = resp.AccessToken
|
||||
d.RefreshToken = resp.RefreshToken
|
||||
op.MustSaveDriverStorage(d)
|
||||
} else if d.ClientSecret != "" {
|
||||
var resp AccessTokenResp
|
||||
_, err := d.Request(AccessToken, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
@ -96,24 +116,38 @@ func (d *Open123) flushAccessToken() error {
|
||||
}
|
||||
d.AccessToken = resp.Data.AccessToken
|
||||
op.MustSaveDriverStorage(d)
|
||||
} else if d.Addition.RefreshToken != "" {
|
||||
var resp RefreshTokenResp
|
||||
_, err := d.Request(RefreshToken, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetQueryParam("client_id", d.ClientID)
|
||||
req.SetQueryParam("grant_type", "refresh_token")
|
||||
req.SetQueryParam("refresh_token", d.Addition.RefreshToken)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.AccessToken = resp.AccessToken
|
||||
d.RefreshToken = resp.RefreshToken
|
||||
op.MustSaveDriverStorage(d)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Open123) SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (newURL string, err error) {
|
||||
// 生成Unix时间戳
|
||||
ts := time.Now().Add(validDuration).Unix()
|
||||
|
||||
// 生成随机数(建议使用UUID,不能包含中划线(-))
|
||||
rand := strings.ReplaceAll(uuid.New().String(), "-", "")
|
||||
|
||||
// 解析URL
|
||||
objURL, err := url.Parse(originURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 待签名字符串,格式:path-timestamp-rand-uid-privateKey
|
||||
unsignedStr := fmt.Sprintf("%s-%d-%s-%d-%s", objURL.Path, ts, rand, uid, privateKey)
|
||||
md5Hash := md5.Sum([]byte(unsignedStr))
|
||||
// 生成鉴权参数,格式:timestamp-rand-uid-md5hash
|
||||
authKey := fmt.Sprintf("%d-%s-%d-%x", ts, rand, uid, md5Hash)
|
||||
|
||||
// 添加鉴权参数到URL查询参数
|
||||
v := objURL.Query()
|
||||
v.Add("auth_key", authKey)
|
||||
objURL.RawQuery = v.Encode()
|
||||
|
||||
return objURL.String(), nil
|
||||
}
|
||||
|
||||
func (d *Open123) getUserInfo() (*UserInfoResp, error) {
|
||||
var resp UserInfoResp
|
||||
|
||||
@ -124,6 +158,18 @@ func (d *Open123) getUserInfo() (*UserInfoResp, error) {
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) getUID() (uint64, error) {
|
||||
if d.UID != 0 {
|
||||
return d.UID, nil
|
||||
}
|
||||
resp, err := d.getUserInfo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d.UID = resp.Data.UID
|
||||
return resp.Data.UID, nil
|
||||
}
|
||||
|
||||
func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*FileListResp, error) {
|
||||
var resp FileListResp
|
||||
|
||||
@ -161,6 +207,21 @@ func (d *Open123) getDownloadInfo(fileId int64) (*DownloadInfoResp, error) {
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) getDirectLink(fileId int64) (*DirectLinkResp, error) {
|
||||
var resp DirectLinkResp
|
||||
|
||||
_, err := d.Request(DirectLink, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"fileID": strconv.FormatInt(fileId, 10),
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) mkdir(parentID int64, name string) error {
|
||||
_, err := d.Request(Mkdir, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
|
@ -70,14 +70,6 @@ func (d *Pan123Share) List(ctx context.Context, dir model.Obj, args model.ListAr
|
||||
func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
// TODO return link of file, required
|
||||
if f, ok := file.(File); ok {
|
||||
//var resp DownResp
|
||||
var headers map[string]string
|
||||
if !utils.IsLocalIPAddr(args.IP) {
|
||||
headers = map[string]string{
|
||||
//"X-Real-IP": "1.1.1.1",
|
||||
"X-Forwarded-For": args.IP,
|
||||
}
|
||||
}
|
||||
data := base.Json{
|
||||
"shareKey": d.ShareKey,
|
||||
"SharePwd": d.SharePwd,
|
||||
@ -87,25 +79,27 @@ func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkA
|
||||
"size": f.Size,
|
||||
}
|
||||
resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetHeaders(headers)
|
||||
req.SetBody(data)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
downloadUrl := utils.Json.Get(resp, "data", "DownloadURL").ToString()
|
||||
u, err := url.Parse(downloadUrl)
|
||||
ou, err := url.Parse(downloadUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nu := u.Query().Get("params")
|
||||
u_ := ou.String()
|
||||
nu := ou.Query().Get("params")
|
||||
if nu != "" {
|
||||
du, _ := base64.StdEncoding.DecodeString(nu)
|
||||
u, err = url.Parse(string(du))
|
||||
u, err := url.Parse(string(du))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u_ = u.String()
|
||||
}
|
||||
u_ := u.String()
|
||||
|
||||
log.Debug("download url: ", u_)
|
||||
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
|
||||
if err != nil {
|
||||
@ -122,7 +116,7 @@ func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkA
|
||||
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
|
||||
}
|
||||
link.Header = http.Header{
|
||||
"Referer": []string{"https://www.123pan.com/"},
|
||||
"Referer": []string{fmt.Sprintf("%s://%s/", ou.Scheme, ou.Host)},
|
||||
}
|
||||
return &link, nil
|
||||
}
|
||||
|
@ -15,17 +15,10 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "123PanShare",
|
||||
LocalSort: true,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: true,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "0",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: false,
|
||||
Name: "123PanShare",
|
||||
LocalSort: true,
|
||||
NoUpload: true,
|
||||
DefaultRoot: "0",
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -24,7 +24,7 @@ type File struct {
|
||||
}
|
||||
|
||||
func (f File) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
return utils.NewHashInfo(utils.MD5, f.Etag)
|
||||
}
|
||||
|
||||
func (f File) GetPath() string {
|
||||
|
@ -522,30 +522,27 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
var err error
|
||||
fullHash := stream.GetHash().GetHash(utils.SHA256)
|
||||
if len(fullHash) != utils.SHA256.Width {
|
||||
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA256)
|
||||
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
size := stream.GetSize()
|
||||
var partSize = d.getPartSize(size)
|
||||
part := size / partSize
|
||||
if size%partSize > 0 {
|
||||
part++
|
||||
} else if part == 0 {
|
||||
part = 1
|
||||
partSize := d.getPartSize(size)
|
||||
part := int64(1)
|
||||
if size > partSize {
|
||||
part = (size + partSize - 1) / partSize
|
||||
}
|
||||
|
||||
// 生成所有 partInfos
|
||||
partInfos := make([]PartInfo, 0, part)
|
||||
for i := int64(0); i < part; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
start := i * partSize
|
||||
byteSize := size - start
|
||||
if byteSize > partSize {
|
||||
byteSize = partSize
|
||||
}
|
||||
byteSize := min(size-start, partSize)
|
||||
partNumber := i + 1
|
||||
partInfo := PartInfo{
|
||||
PartNumber: partNumber,
|
||||
@ -593,17 +590,20 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
// resp.Data.RapidUpload: true 支持快传,但此处直接检测是否返回分片的上传地址
|
||||
// 快传的情况下同样需要手动处理冲突
|
||||
if resp.Data.PartInfos != nil {
|
||||
// 读取前100个分片的上传地址
|
||||
uploadPartInfos := resp.Data.PartInfos
|
||||
// Progress
|
||||
p := driver.NewProgress(size, up)
|
||||
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
|
||||
|
||||
// 获取后续分片的上传地址
|
||||
for i := 101; i < len(partInfos); i += 100 {
|
||||
end := i + 100
|
||||
if end > len(partInfos) {
|
||||
end = len(partInfos)
|
||||
}
|
||||
// 先上传前100个分片
|
||||
err = d.uploadPersonalParts(ctx, partInfos, resp.Data.PartInfos, rateLimited, p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 如果还有剩余分片,分批获取上传地址并上传
|
||||
for i := 100; i < len(partInfos); i += 100 {
|
||||
end := min(i+100, len(partInfos))
|
||||
batchPartInfos := partInfos[i:end]
|
||||
|
||||
moredata := base.Json{
|
||||
"fileId": resp.Data.FileId,
|
||||
"uploadId": resp.Data.UploadId,
|
||||
@ -619,45 +619,13 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uploadPartInfos = append(uploadPartInfos, moreresp.Data.PartInfos...)
|
||||
}
|
||||
|
||||
// Progress
|
||||
p := driver.NewProgress(size, up)
|
||||
|
||||
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
|
||||
// 上传所有分片
|
||||
for _, uploadPartInfo := range uploadPartInfos {
|
||||
index := uploadPartInfo.PartNumber - 1
|
||||
partSize := partInfos[index].PartSize
|
||||
log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos))
|
||||
limitReader := io.LimitReader(rateLimited, partSize)
|
||||
|
||||
// Update Progress
|
||||
r := io.TeeReader(limitReader, p)
|
||||
|
||||
req, err := http.NewRequest("PUT", uploadPartInfo.UploadUrl, r)
|
||||
err = d.uploadPersonalParts(ctx, partInfos, moreresp.Data.PartInfos, rateLimited, p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Length", fmt.Sprint(partSize))
|
||||
req.Header.Set("Origin", "https://yun.139.com")
|
||||
req.Header.Set("Referer", "https://yun.139.com/")
|
||||
req.ContentLength = partSize
|
||||
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
log.Debugf("[139] uploaded: %+v", res)
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
// 全部分片上传完毕后,complete
|
||||
data = base.Json{
|
||||
"contentHash": fullHash,
|
||||
"contentHashAlgorithm": "SHA256",
|
||||
@ -786,12 +754,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
size := stream.GetSize()
|
||||
// Progress
|
||||
p := driver.NewProgress(size, up)
|
||||
var partSize = d.getPartSize(size)
|
||||
part := size / partSize
|
||||
if size%partSize > 0 {
|
||||
part++
|
||||
} else if part == 0 {
|
||||
part = 1
|
||||
partSize := d.getPartSize(size)
|
||||
part := int64(1)
|
||||
if size > partSize {
|
||||
part = (size + partSize - 1) / partSize
|
||||
}
|
||||
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
|
||||
for i := int64(0); i < part; i++ {
|
||||
@ -805,12 +771,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
limitReader := io.LimitReader(rateLimited, byteSize)
|
||||
// Update Progress
|
||||
r := io.TeeReader(limitReader, p)
|
||||
req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, resp.Data.UploadResult.RedirectionURL, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName()))
|
||||
req.Header.Set("contentSize", strconv.FormatInt(size, 10))
|
||||
req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1))
|
||||
|
@ -1,9 +1,11 @@
|
||||
package _139
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@ -13,6 +15,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
@ -623,3 +626,47 @@ func (d *Yun139) getPersonalCloudHost() string {
|
||||
}
|
||||
return d.PersonalCloudHost
|
||||
}
|
||||
|
||||
func (d *Yun139) uploadPersonalParts(ctx context.Context, partInfos []PartInfo, uploadPartInfos []PersonalPartInfo, rateLimited *driver.RateLimitReader, p *driver.Progress) error {
|
||||
// 确保数组以 PartNumber 从小到大排序
|
||||
sort.Slice(uploadPartInfos, func(i, j int) bool {
|
||||
return uploadPartInfos[i].PartNumber < uploadPartInfos[j].PartNumber
|
||||
})
|
||||
|
||||
for _, uploadPartInfo := range uploadPartInfos {
|
||||
index := uploadPartInfo.PartNumber - 1
|
||||
if index < 0 || index >= len(partInfos) {
|
||||
return fmt.Errorf("invalid PartNumber %d: index out of bounds (partInfos length: %d)", uploadPartInfo.PartNumber, len(partInfos))
|
||||
}
|
||||
partSize := partInfos[index].PartSize
|
||||
log.Debugf("[139] uploading part %+v/%+v", index, len(partInfos))
|
||||
limitReader := io.LimitReader(rateLimited, partSize)
|
||||
r := io.TeeReader(limitReader, p)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartInfo.UploadUrl, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Length", fmt.Sprint(partSize))
|
||||
req.Header.Set("Origin", "https://yun.139.com")
|
||||
req.Header.Set("Referer", "https://yun.139.com/")
|
||||
req.ContentLength = partSize
|
||||
err = func() error {
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
log.Debugf("[139] uploaded: %+v", res)
|
||||
if res.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
return fmt.Errorf("unexpected status code: %d, body: %s", res.StatusCode, string(body))
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -365,11 +365,10 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||
log.Debugf("uploadData: %+v", uploadData)
|
||||
requestURL := uploadData.RequestURL
|
||||
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
|
||||
req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
for _, v := range uploadHeaders {
|
||||
i := strings.Index(v, "=")
|
||||
req.Header.Set(v[0:i], v[i+1:])
|
||||
|
@ -1,7 +1,6 @@
|
||||
package _189_tv
|
||||
|
||||
import (
|
||||
"container/ring"
|
||||
"context"
|
||||
"net/http"
|
||||
"strconv"
|
||||
@ -12,18 +11,20 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
type Cloud189TV struct {
|
||||
model.Storage
|
||||
Addition
|
||||
client *resty.Client
|
||||
tokenInfo *AppSessionResp
|
||||
uploadThread int
|
||||
familyTransferFolder *ring.Ring
|
||||
cleanFamilyTransferFile func()
|
||||
storageConfig driver.Config
|
||||
client *resty.Client
|
||||
tokenInfo *AppSessionResp
|
||||
uploadThread int
|
||||
storageConfig driver.Config
|
||||
|
||||
TempUuid string
|
||||
cron *cron.Cron // 新增 cron 字段
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) Config() driver.Config {
|
||||
@ -79,10 +80,17 @@ func (y *Cloud189TV) Init(ctx context.Context) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
y.cron = cron.NewCron(time.Minute * 5)
|
||||
y.cron.Do(y.keepAlive)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) Drop(ctx context.Context) error {
|
||||
if y.cron != nil {
|
||||
y.cron.Stop()
|
||||
y.cron = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
AccessToken string `json:"access_token"`
|
||||
TempUuid string
|
||||
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
Type string `json:"type" type:"select" options:"personal,family" default:"personal"`
|
||||
|
@ -5,17 +5,19 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/skip2/go-qrcode"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/skip2/go-qrcode"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
|
||||
"github.com/go-resty/resty/v2"
|
||||
@ -64,6 +66,10 @@ func (y *Cloud189TV) AppKeySignatureHeader(url, method string) map[string]string
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, isFamily ...bool) ([]byte, error) {
|
||||
return y.requestWithRetry(url, method, callback, params, resp, 0, isFamily...)
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) requestWithRetry(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, retryCount int, isFamily ...bool) ([]byte, error) {
|
||||
req := y.client.R().SetQueryParams(clientSuffix())
|
||||
|
||||
if params != nil {
|
||||
@ -89,7 +95,22 @@ func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, para
|
||||
|
||||
if strings.Contains(res.String(), "userSessionBO is null") ||
|
||||
strings.Contains(res.String(), "InvalidSessionKey") {
|
||||
return nil, errors.New("session expired")
|
||||
// 限制重试次数,避免无限递归
|
||||
if retryCount >= 3 {
|
||||
y.Addition.AccessToken = ""
|
||||
op.MustSaveDriverStorage(y)
|
||||
return nil, errors.New("session expired after retry")
|
||||
}
|
||||
|
||||
// 尝试刷新会话
|
||||
if err := y.refreshSession(); err != nil {
|
||||
// 如果刷新失败,说明AccessToken也已过期,需要重新登录
|
||||
y.Addition.AccessToken = ""
|
||||
op.MustSaveDriverStorage(y)
|
||||
return nil, errors.New("session expired")
|
||||
}
|
||||
// 如果刷新成功,则重试原始请求(增加重试计数)
|
||||
return y.requestWithRetry(url, method, callback, params, resp, retryCount+1, isFamily...)
|
||||
}
|
||||
|
||||
// 处理错误
|
||||
@ -129,6 +150,7 @@ func (y *Cloud189TV) put(ctx context.Context, url string, headers map[string]str
|
||||
}
|
||||
}
|
||||
|
||||
// 请求完成后http.Client会Close Request.Body
|
||||
resp, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -208,7 +230,7 @@ func (y *Cloud189TV) login() (err error) {
|
||||
var erron RespErr
|
||||
var tokenInfo AppSessionResp
|
||||
if y.Addition.AccessToken == "" {
|
||||
if y.Addition.TempUuid == "" {
|
||||
if y.TempUuid == "" {
|
||||
// 获取登录参数
|
||||
var uuidInfo UuidInfoResp
|
||||
req.SetResult(&uuidInfo).SetError(&erron)
|
||||
@ -227,7 +249,7 @@ func (y *Cloud189TV) login() (err error) {
|
||||
if uuidInfo.Uuid == "" {
|
||||
return errors.New("uuidInfo is empty")
|
||||
}
|
||||
y.Addition.TempUuid = uuidInfo.Uuid
|
||||
y.TempUuid = uuidInfo.Uuid
|
||||
op.MustSaveDriverStorage(y)
|
||||
|
||||
// 展示二维码
|
||||
@ -255,7 +277,7 @@ func (y *Cloud189TV) login() (err error) {
|
||||
// Signature
|
||||
req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/qrcodeLoginResult.action",
|
||||
http.MethodGet))
|
||||
req.SetQueryParam("uuid", y.Addition.TempUuid)
|
||||
req.SetQueryParam("uuid", y.TempUuid)
|
||||
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/qrcodeLoginResult.action")
|
||||
if err != nil {
|
||||
return
|
||||
@ -267,7 +289,6 @@ func (y *Cloud189TV) login() (err error) {
|
||||
return errors.New("E189AccessToken is empty")
|
||||
}
|
||||
y.Addition.AccessToken = accessTokenResp.E189AccessToken
|
||||
y.Addition.TempUuid = ""
|
||||
}
|
||||
}
|
||||
// 获取SessionKey 和 SessionSecret
|
||||
@ -291,6 +312,44 @@ func (y *Cloud189TV) login() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// refreshSession 尝试使用现有的 AccessToken 刷新会话
|
||||
func (y *Cloud189TV) refreshSession() (err error) {
|
||||
var erron RespErr
|
||||
var tokenInfo AppSessionResp
|
||||
reqb := y.client.R().SetQueryParams(clientSuffix())
|
||||
reqb.SetResult(&tokenInfo).SetError(&erron)
|
||||
// Signature
|
||||
reqb.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/loginFamilyMerge.action",
|
||||
http.MethodGet))
|
||||
reqb.SetQueryParam("e189AccessToken", y.Addition.AccessToken)
|
||||
_, err = reqb.Execute(http.MethodGet, ApiUrl+"/family/manage/loginFamilyMerge.action")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if erron.HasError() {
|
||||
return &erron
|
||||
}
|
||||
|
||||
y.tokenInfo = &tokenInfo
|
||||
return nil
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) keepAlive() {
|
||||
_, err := y.get(ApiUrl+"/keepUserSession.action", func(r *resty.Request) {
|
||||
r.SetQueryParams(clientSuffix())
|
||||
}, nil)
|
||||
if err != nil {
|
||||
utils.Log.Warnf("189tv: Failed to keep user session alive: %v", err)
|
||||
// 如果keepAlive失败,尝试刷新session
|
||||
if refreshErr := y.refreshSession(); refreshErr != nil {
|
||||
utils.Log.Errorf("189tv: Failed to refresh session after keepAlive error: %v", refreshErr)
|
||||
}
|
||||
} else {
|
||||
utils.Log.Debugf("189tv: User session kept alive successfully.")
|
||||
}
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||
fileMd5 := stream.GetHash().GetHash(utils.MD5)
|
||||
if len(fileMd5) < utils.MD5.Width {
|
||||
@ -311,11 +370,14 @@ func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
|
||||
|
||||
// 旧版本上传,家庭云不支持覆盖
|
||||
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
fileMd5 := file.GetHash().GetHash(utils.MD5)
|
||||
var tempFile = file.GetFile()
|
||||
var err error
|
||||
if len(fileMd5) != utils.MD5.Width {
|
||||
tempFile, fileMd5, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||
} else if tempFile == nil {
|
||||
tempFile, err = file.CacheFullAndWriter(&up, nil)
|
||||
}
|
||||
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -328,6 +390,10 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
||||
|
||||
// 网盘中不存在该文件,开始上传
|
||||
status := GetUploadFileStatusResp{CreateUploadFileResp: *uploadInfo}
|
||||
// driver.RateLimitReader会尝试Close底层的reader
|
||||
// 但这里的tempFile是一个*os.File,Close后就没法继续读了
|
||||
// 所以这里用io.NopCloser包一层
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile))
|
||||
for status.GetSize() < file.GetSize() && status.FileDataExists != 1 {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
@ -345,7 +411,7 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
||||
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
||||
}
|
||||
|
||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily)
|
||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, rateLimitedRd, isFamily)
|
||||
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/google/uuid"
|
||||
@ -21,12 +22,12 @@ type Cloud189PC struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
identity string
|
||||
|
||||
client *resty.Client
|
||||
|
||||
loginParam *LoginParam
|
||||
tokenInfo *AppSessionResp
|
||||
loginParam *LoginParam
|
||||
qrcodeParam *QRLoginParam
|
||||
|
||||
tokenInfo *AppSessionResp
|
||||
|
||||
uploadThread int
|
||||
|
||||
@ -35,6 +36,7 @@ type Cloud189PC struct {
|
||||
|
||||
storageConfig driver.Config
|
||||
ref *Cloud189PC
|
||||
cron *cron.Cron
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) Config() driver.Config {
|
||||
@ -84,14 +86,22 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||
})
|
||||
}
|
||||
|
||||
// 避免重复登陆
|
||||
identity := utils.GetMD5EncodeStr(y.Username + y.Password)
|
||||
if !y.isLogin() || y.identity != identity {
|
||||
y.identity = identity
|
||||
// 先尝试用Token刷新,之后尝试登陆
|
||||
if y.Addition.RefreshToken != "" {
|
||||
y.tokenInfo = &AppSessionResp{RefreshToken: y.Addition.RefreshToken}
|
||||
if err = y.refreshToken(); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if err = y.login(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// 初始化并启动 cron 任务
|
||||
y.cron = cron.NewCron(time.Duration(time.Minute * 5))
|
||||
// 每5分钟执行一次 keepAlive
|
||||
y.cron.Do(y.keepAlive)
|
||||
}
|
||||
|
||||
// 处理家庭云ID
|
||||
@ -128,6 +138,10 @@ func (d *Cloud189PC) InitReference(storage driver.Driver) error {
|
||||
|
||||
func (y *Cloud189PC) Drop(ctx context.Context) error {
|
||||
y.ref = nil
|
||||
if y.cron != nil {
|
||||
y.cron.Stop()
|
||||
y.cron = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -80,6 +80,20 @@ func timestamp() int64 {
|
||||
return time.Now().UTC().UnixNano() / 1e6
|
||||
}
|
||||
|
||||
// formatDate formats a time.Time object into the "YYYY-MM-DDHH:mm:ssSSS" format.
|
||||
func formatDate(t time.Time) string {
|
||||
// The layout string "2006-01-0215:04:05.000" corresponds to:
|
||||
// 2006 -> Year (YYYY)
|
||||
// 01 -> Month (MM)
|
||||
// 02 -> Day (DD)
|
||||
// 15 -> Hour (HH)
|
||||
// 04 -> Minute (mm)
|
||||
// 05 -> Second (ss)
|
||||
// 000 -> Millisecond (SSS) with leading zeros
|
||||
// Note the lack of a separator between the date and hour, matching the desired output.
|
||||
return t.Format("2006-01-0215:04:05.000")
|
||||
}
|
||||
|
||||
func MustParseTime(str string) *time.Time {
|
||||
lastOpTime, _ := time.ParseInLocation("2006-01-02 15:04:05 -07", str+" +08", time.Local)
|
||||
return &lastOpTime
|
||||
|
@ -6,9 +6,11 @@ import (
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
Username string `json:"username" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
VCode string `json:"validate_code"`
|
||||
LoginType string `json:"login_type" type:"select" options:"password,qrcode" default:"password" required:"true"`
|
||||
Username string `json:"username" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
VCode string `json:"validate_code"`
|
||||
RefreshToken string `json:"refresh_token" help:"To switch accounts, please clear this field"`
|
||||
driver.RootID
|
||||
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
|
@ -68,15 +68,7 @@ func (e *RespErr) Error() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// 登陆需要的参数
|
||||
type LoginParam struct {
|
||||
// 加密后的用户名和密码
|
||||
RsaUsername string
|
||||
RsaPassword string
|
||||
|
||||
// rsa密钥
|
||||
jRsaKey string
|
||||
|
||||
type BaseLoginParam struct {
|
||||
// 请求头参数
|
||||
Lt string
|
||||
ReqId string
|
||||
@ -88,6 +80,27 @@ type LoginParam struct {
|
||||
CaptchaToken string
|
||||
}
|
||||
|
||||
// QRLoginParam 用于暂存二维码登录过程中的参数
|
||||
type QRLoginParam struct {
|
||||
BaseLoginParam
|
||||
|
||||
UUID string `json:"uuid"`
|
||||
EncodeUUID string `json:"encodeuuid"`
|
||||
EncryUUID string `json:"encryuuid"`
|
||||
}
|
||||
|
||||
// 登陆需要的参数
|
||||
type LoginParam struct {
|
||||
// 加密后的用户名和密码
|
||||
RsaUsername string
|
||||
RsaPassword string
|
||||
|
||||
// rsa密钥
|
||||
jRsaKey string
|
||||
|
||||
BaseLoginParam
|
||||
}
|
||||
|
||||
// 登陆加密相关
|
||||
type EncryptConfResp struct {
|
||||
Result int `json:"result"`
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
@ -28,6 +29,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/skip2/go-qrcode"
|
||||
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
@ -53,6 +55,9 @@ const (
|
||||
MAC = "TELEMAC"
|
||||
|
||||
CHANNEL_ID = "web_cloud.189.cn"
|
||||
|
||||
// Error codes
|
||||
UserInvalidOpenTokenError = "UserInvalidOpenToken"
|
||||
)
|
||||
|
||||
func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string {
|
||||
@ -263,7 +268,14 @@ func (y *Cloud189PC) findFileByName(ctx context.Context, searchName string, fold
|
||||
}
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) login() (err error) {
|
||||
func (y *Cloud189PC) login() error {
|
||||
if y.LoginType == "qrcode" {
|
||||
return y.loginByQRCode()
|
||||
}
|
||||
return y.loginByPassword()
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) loginByPassword() (err error) {
|
||||
// 初始化登陆所需参数
|
||||
if y.loginParam == nil {
|
||||
if err = y.initLoginParam(); err != nil {
|
||||
@ -277,10 +289,15 @@ func (y *Cloud189PC) login() (err error) {
|
||||
// 销毁登陆参数
|
||||
y.loginParam = nil
|
||||
// 遇到错误,重新加载登陆参数(刷新验证码)
|
||||
if err != nil && y.NoUseOcr {
|
||||
if err1 := y.initLoginParam(); err1 != nil {
|
||||
err = fmt.Errorf("err1: %s \nerr2: %s", err, err1)
|
||||
if err != nil {
|
||||
if y.NoUseOcr {
|
||||
if err1 := y.initLoginParam(); err1 != nil {
|
||||
err = fmt.Errorf("err1: %s \nerr2: %s", err, err1)
|
||||
}
|
||||
}
|
||||
|
||||
y.Status = err.Error()
|
||||
op.MustSaveDriverStorage(y)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -335,14 +352,105 @@ func (y *Cloud189PC) login() (err error) {
|
||||
err = fmt.Errorf(tokenInfo.ResMessage)
|
||||
return
|
||||
}
|
||||
y.Addition.RefreshToken = tokenInfo.RefreshToken
|
||||
y.tokenInfo = &tokenInfo
|
||||
op.MustSaveDriverStorage(y)
|
||||
return
|
||||
}
|
||||
|
||||
/* 初始化登陆需要的参数
|
||||
* 如果遇到验证码返回错误
|
||||
*/
|
||||
func (y *Cloud189PC) initLoginParam() error {
|
||||
func (y *Cloud189PC) loginByQRCode() error {
|
||||
if y.qrcodeParam == nil {
|
||||
if err := y.initQRCodeParam(); err != nil {
|
||||
// 二维码也通过错误返回
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var state struct {
|
||||
Status int `json:"status"`
|
||||
RedirectUrl string `json:"redirectUrl"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
_, err := y.client.R().
|
||||
SetHeaders(map[string]string{
|
||||
"Referer": AUTH_URL,
|
||||
"Reqid": y.qrcodeParam.ReqId,
|
||||
"lt": y.qrcodeParam.Lt,
|
||||
}).
|
||||
SetFormData(map[string]string{
|
||||
"appId": APP_ID,
|
||||
"clientType": CLIENT_TYPE,
|
||||
"returnUrl": RETURN_URL,
|
||||
"paramId": y.qrcodeParam.ParamId,
|
||||
"uuid": y.qrcodeParam.UUID,
|
||||
"encryuuid": y.qrcodeParam.EncryUUID,
|
||||
"date": formatDate(now),
|
||||
"timeStamp": fmt.Sprint(now.UTC().UnixNano() / 1e6),
|
||||
}).
|
||||
ForceContentType("application/json;charset=UTF-8").
|
||||
SetResult(&state).
|
||||
Post(AUTH_URL + "/api/logbox/oauth2/qrcodeLoginState.do")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check QR code state: %w", err)
|
||||
}
|
||||
|
||||
switch state.Status {
|
||||
case 0: // 登录成功
|
||||
var tokenInfo AppSessionResp
|
||||
_, err = y.client.R().
|
||||
SetResult(&tokenInfo).
|
||||
SetQueryParams(clientSuffix()).
|
||||
SetQueryParam("redirectURL", state.RedirectUrl).
|
||||
Post(API_URL + "/getSessionForPC.action")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tokenInfo.ResCode != 0 {
|
||||
return fmt.Errorf(tokenInfo.ResMessage)
|
||||
}
|
||||
y.Addition.RefreshToken = tokenInfo.RefreshToken
|
||||
y.tokenInfo = &tokenInfo
|
||||
op.MustSaveDriverStorage(y)
|
||||
return nil
|
||||
case -11001: // 二维码过期
|
||||
y.qrcodeParam = nil
|
||||
return errors.New("QR code expired, please try again")
|
||||
case -106: // 等待扫描
|
||||
return y.genQRCode("QR code has not been scanned yet, please scan and save again")
|
||||
case -11002: // 等待确认
|
||||
return y.genQRCode("QR code has been scanned, please confirm the login on your phone and save again")
|
||||
default: // 其他错误
|
||||
y.qrcodeParam = nil
|
||||
return fmt.Errorf("QR code login failed with status %d: %s", state.Status, state.Msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) genQRCode(text string) error {
|
||||
// 展示二维码
|
||||
qrTemplate := `<body>
|
||||
state: %s
|
||||
<br><img src="data:image/jpeg;base64,%s"/>
|
||||
<br>Or Click here: <a href="%s">Login</a>
|
||||
</body>`
|
||||
|
||||
// Generate QR code
|
||||
qrCode, err := qrcode.Encode(y.qrcodeParam.UUID, qrcode.Medium, 256)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate QR code: %v", err)
|
||||
}
|
||||
|
||||
// Encode QR code to base64
|
||||
qrCodeBase64 := base64.StdEncoding.EncodeToString(qrCode)
|
||||
|
||||
// Create the HTML page
|
||||
qrPage := fmt.Sprintf(qrTemplate, text, qrCodeBase64, y.qrcodeParam.UUID)
|
||||
return fmt.Errorf("need verify: \n%s", qrPage)
|
||||
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) initBaseParams() (*BaseLoginParam, error) {
|
||||
// 清除cookie
|
||||
jar, _ := cookiejar.New(nil)
|
||||
y.client.SetCookieJar(jar)
|
||||
@ -356,17 +464,30 @@ func (y *Cloud189PC) initLoginParam() error {
|
||||
}).
|
||||
Get(WEB_URL + "/api/portal/unifyLoginForPC.action")
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
param := LoginParam{
|
||||
return &BaseLoginParam{
|
||||
CaptchaToken: regexp.MustCompile(`'captchaToken' value='(.+?)'`).FindStringSubmatch(res.String())[1],
|
||||
Lt: regexp.MustCompile(`lt = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
||||
ParamId: regexp.MustCompile(`paramId = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
||||
ReqId: regexp.MustCompile(`reqId = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
||||
// jRsaKey: regexp.MustCompile(`"j_rsaKey" value="(.+?)"`).FindStringSubmatch(res.String())[1],
|
||||
}, nil
|
||||
}
|
||||
|
||||
/* 初始化登陆需要的参数
|
||||
* 如果遇到验证码返回错误
|
||||
*/
|
||||
func (y *Cloud189PC) initLoginParam() error {
|
||||
y.loginParam = nil
|
||||
|
||||
baseParam, err := y.initBaseParams()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
y.loginParam = &LoginParam{BaseLoginParam: *baseParam}
|
||||
|
||||
// 获取rsa公钥
|
||||
var encryptConf EncryptConfResp
|
||||
_, err = y.client.R().
|
||||
@ -377,18 +498,17 @@ func (y *Cloud189PC) initLoginParam() error {
|
||||
return err
|
||||
}
|
||||
|
||||
param.jRsaKey = fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", encryptConf.Data.PubKey)
|
||||
param.RsaUsername = encryptConf.Data.Pre + RsaEncrypt(param.jRsaKey, y.Username)
|
||||
param.RsaPassword = encryptConf.Data.Pre + RsaEncrypt(param.jRsaKey, y.Password)
|
||||
y.loginParam = ¶m
|
||||
y.loginParam.jRsaKey = fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", encryptConf.Data.PubKey)
|
||||
y.loginParam.RsaUsername = encryptConf.Data.Pre + RsaEncrypt(y.loginParam.jRsaKey, y.Username)
|
||||
y.loginParam.RsaPassword = encryptConf.Data.Pre + RsaEncrypt(y.loginParam.jRsaKey, y.Password)
|
||||
|
||||
// 判断是否需要验证码
|
||||
resp, err := y.client.R().
|
||||
SetHeader("REQID", param.ReqId).
|
||||
SetHeader("REQID", y.loginParam.ReqId).
|
||||
SetFormData(map[string]string{
|
||||
"appKey": APP_ID,
|
||||
"accountType": ACCOUNT_TYPE,
|
||||
"userName": param.RsaUsername,
|
||||
"userName": y.loginParam.RsaUsername,
|
||||
}).Post(AUTH_URL + "/api/logbox/oauth2/needcaptcha.do")
|
||||
if err != nil {
|
||||
return err
|
||||
@ -400,8 +520,8 @@ func (y *Cloud189PC) initLoginParam() error {
|
||||
// 拉取验证码
|
||||
imgRes, err := y.client.R().
|
||||
SetQueryParams(map[string]string{
|
||||
"token": param.CaptchaToken,
|
||||
"REQID": param.ReqId,
|
||||
"token": y.loginParam.CaptchaToken,
|
||||
"REQID": y.loginParam.ReqId,
|
||||
"rnd": fmt.Sprint(timestamp()),
|
||||
}).
|
||||
Get(AUTH_URL + "/api/logbox/oauth2/picCaptcha.do")
|
||||
@ -428,10 +548,38 @@ func (y *Cloud189PC) initLoginParam() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getQRCode 获取并返回二维码
|
||||
func (y *Cloud189PC) initQRCodeParam() (err error) {
|
||||
y.qrcodeParam = nil
|
||||
|
||||
baseParam, err := y.initBaseParams()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var qrcodeParam QRLoginParam
|
||||
_, err = y.client.R().
|
||||
SetFormData(map[string]string{"appId": APP_ID}).
|
||||
ForceContentType("application/json;charset=UTF-8").
|
||||
SetResult(&qrcodeParam).
|
||||
Post(AUTH_URL + "/api/logbox/oauth2/getUUID.do")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
qrcodeParam.BaseLoginParam = *baseParam
|
||||
y.qrcodeParam = &qrcodeParam
|
||||
|
||||
return y.genQRCode("please scan the QR code with the 189 Cloud app, then save the settings again.")
|
||||
}
|
||||
|
||||
// 刷新会话
|
||||
func (y *Cloud189PC) refreshSession() (err error) {
|
||||
return y.refreshSessionWithRetry(0)
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) refreshSessionWithRetry(retryCount int) (err error) {
|
||||
if y.ref != nil {
|
||||
return y.ref.refreshSession()
|
||||
return y.ref.refreshSessionWithRetry(retryCount)
|
||||
}
|
||||
var erron RespErr
|
||||
var userSessionResp UserSessionResp
|
||||
@ -448,37 +596,102 @@ func (y *Cloud189PC) refreshSession() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// 错误影响正常访问,下线该储存
|
||||
defer func() {
|
||||
if err != nil {
|
||||
y.GetStorage().SetStatus(fmt.Sprintf("%+v", err.Error()))
|
||||
op.MustSaveDriverStorage(y)
|
||||
}
|
||||
}()
|
||||
|
||||
// token生效刷新token
|
||||
if erron.HasError() {
|
||||
if erron.ResCode == "UserInvalidOpenToken" {
|
||||
if err = y.login(); err != nil {
|
||||
return err
|
||||
}
|
||||
if erron.ResCode == UserInvalidOpenTokenError {
|
||||
return y.refreshTokenWithRetry(retryCount)
|
||||
}
|
||||
return &erron
|
||||
}
|
||||
y.tokenInfo.UserSessionResp = userSessionResp
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
// refreshToken 刷新token,失败时返回错误,不再直接调用login
|
||||
func (y *Cloud189PC) refreshToken() (err error) {
|
||||
return y.refreshTokenWithRetry(0)
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) refreshTokenWithRetry(retryCount int) (err error) {
|
||||
if y.ref != nil {
|
||||
return y.ref.refreshTokenWithRetry(retryCount)
|
||||
}
|
||||
|
||||
// 限制重试次数,避免无限递归
|
||||
if retryCount >= 3 {
|
||||
if y.Addition.RefreshToken != "" {
|
||||
y.Addition.RefreshToken = ""
|
||||
op.MustSaveDriverStorage(y)
|
||||
}
|
||||
return errors.New("refresh token failed after maximum retries")
|
||||
}
|
||||
|
||||
var erron RespErr
|
||||
var tokenInfo AppSessionResp
|
||||
_, err = y.client.R().
|
||||
SetResult(&tokenInfo).
|
||||
ForceContentType("application/json;charset=UTF-8").
|
||||
SetError(&erron).
|
||||
SetFormData(map[string]string{
|
||||
"clientId": APP_ID,
|
||||
"refreshToken": y.tokenInfo.RefreshToken,
|
||||
"grantType": "refresh_token",
|
||||
"format": "json",
|
||||
}).
|
||||
Post(AUTH_URL + "/api/oauth2/refreshToken.do")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 如果刷新失败,返回错误给上层处理
|
||||
if erron.HasError() {
|
||||
if y.Addition.RefreshToken != "" {
|
||||
y.Addition.RefreshToken = ""
|
||||
op.MustSaveDriverStorage(y)
|
||||
}
|
||||
|
||||
// 根据登录类型决定下一步行为
|
||||
if y.LoginType == "qrcode" {
|
||||
return errors.New("QR code session has expired, please re-scan the code to log in")
|
||||
}
|
||||
// 密码登录模式下,尝试回退到完整登录
|
||||
return y.login()
|
||||
}
|
||||
|
||||
y.Addition.RefreshToken = tokenInfo.RefreshToken
|
||||
y.tokenInfo = &tokenInfo
|
||||
op.MustSaveDriverStorage(y)
|
||||
return y.refreshSessionWithRetry(retryCount + 1)
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) keepAlive() {
|
||||
_, err := y.get(API_URL+"/keepUserSession.action", func(r *resty.Request) {
|
||||
r.SetQueryParams(clientSuffix())
|
||||
}, nil)
|
||||
if err != nil {
|
||||
utils.Log.Warnf("189pc: Failed to keep user session alive: %v", err)
|
||||
// 如果keepAlive失败,尝试刷新session
|
||||
if refreshErr := y.refreshSession(); refreshErr != nil {
|
||||
utils.Log.Errorf("189pc: Failed to refresh session after keepAlive error: %v", refreshErr)
|
||||
}
|
||||
} else {
|
||||
utils.Log.Debugf("189pc: User session kept alive successfully.")
|
||||
}
|
||||
}
|
||||
|
||||
// 普通上传
|
||||
// 无法上传大小为0的文件
|
||||
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||
size := file.GetSize()
|
||||
sliceSize := partSize(size)
|
||||
// 文件大小
|
||||
fileSize := file.GetSize()
|
||||
// 分片大小,不得为文件大小
|
||||
sliceSize := partSize(fileSize)
|
||||
|
||||
params := Params{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": url.QueryEscape(file.GetName()),
|
||||
"fileSize": fmt.Sprint(file.GetSize()),
|
||||
"sliceSize": fmt.Sprint(sliceSize),
|
||||
"fileSize": fmt.Sprint(fileSize),
|
||||
"sliceSize": fmt.Sprint(sliceSize), // 必须为特定分片大小
|
||||
"lazyCheck": "1",
|
||||
}
|
||||
|
||||
@ -500,66 +713,100 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
return nil, err
|
||||
}
|
||||
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
|
||||
ss, err := stream.NewStreamSectionReader(file, int(sliceSize), &up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
threadG, upCtx := errgroup.NewOrderedGroupWithContext(ctx, y.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
count := int(size / sliceSize)
|
||||
lastPartSize := size % sliceSize
|
||||
if lastPartSize > 0 {
|
||||
count++
|
||||
} else {
|
||||
count := 1
|
||||
if fileSize > sliceSize {
|
||||
count = int((fileSize + sliceSize - 1) / sliceSize)
|
||||
}
|
||||
lastPartSize := fileSize % sliceSize
|
||||
if lastPartSize == 0 {
|
||||
lastPartSize = sliceSize
|
||||
}
|
||||
fileMd5 := utils.MD5.NewFunc()
|
||||
silceMd5 := utils.MD5.NewFunc()
|
||||
|
||||
silceMd5Hexs := make([]string, 0, count)
|
||||
teeReader := io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5))
|
||||
byteSize := sliceSize
|
||||
silceMd5 := utils.MD5.NewFunc()
|
||||
var writers io.Writer = silceMd5
|
||||
|
||||
fileMd5Hex := file.GetHash().GetHash(utils.MD5)
|
||||
var fileMd5 hash.Hash
|
||||
if len(fileMd5Hex) != utils.MD5.Width {
|
||||
fileMd5 = utils.MD5.NewFunc()
|
||||
writers = io.MultiWriter(silceMd5, fileMd5)
|
||||
}
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
offset := int64((i)-1) * sliceSize
|
||||
partSize := sliceSize
|
||||
if i == count {
|
||||
byteSize = lastPartSize
|
||||
}
|
||||
byteData := make([]byte, byteSize)
|
||||
// 读取块
|
||||
silceMd5.Reset()
|
||||
if _, err := io.ReadFull(teeReader, byteData); err != io.EOF && err != nil {
|
||||
return nil, err
|
||||
partSize = lastPartSize
|
||||
}
|
||||
partInfo := ""
|
||||
var reader *stream.SectionReader
|
||||
var rateLimitedRd io.Reader
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
if reader == nil {
|
||||
var err error
|
||||
reader, err = ss.GetSectionReader(offset, partSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
silceMd5.Reset()
|
||||
w, err := utils.CopyWithBuffer(writers, reader)
|
||||
if w != partSize {
|
||||
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", partSize, w, err)
|
||||
}
|
||||
// 计算块md5并进行hex和base64编码
|
||||
md5Bytes := silceMd5.Sum(nil)
|
||||
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
|
||||
partInfo = fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
||||
|
||||
// 计算块md5并进行hex和base64编码
|
||||
md5Bytes := silceMd5.Sum(nil)
|
||||
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
|
||||
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
||||
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Do: func(ctx context.Context) error {
|
||||
reader.Seek(0, io.SeekStart)
|
||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// step.4 上传切片
|
||||
uploadUrl := uploadUrls[0]
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(float64(threadG.Success()) * 100 / float64(count))
|
||||
return nil
|
||||
})
|
||||
// step.4 上传切片
|
||||
uploadUrl := uploadUrls[0]
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(float64(threadG.Success()) * 100 / float64(count))
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
ss.FreeSectionReader(reader)
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||
if fileMd5 != nil {
|
||||
fileMd5Hex = strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||
}
|
||||
sliceMd5Hex := fileMd5Hex
|
||||
if file.GetSize() > sliceSize {
|
||||
if fileSize > sliceSize {
|
||||
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
|
||||
}
|
||||
|
||||
@ -620,11 +867,12 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
||||
cache = tmpF
|
||||
}
|
||||
sliceSize := partSize(size)
|
||||
count := int(size / sliceSize)
|
||||
count := 1
|
||||
if size > sliceSize {
|
||||
count = int((size + sliceSize - 1) / sliceSize)
|
||||
}
|
||||
lastSliceSize := size % sliceSize
|
||||
if lastSliceSize > 0 {
|
||||
count++
|
||||
} else {
|
||||
if lastSliceSize == 0 {
|
||||
lastSliceSize = sliceSize
|
||||
}
|
||||
|
||||
@ -738,7 +986,8 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
||||
}
|
||||
|
||||
// step.4 上传切片
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(cache, offset, byteSize), isFamily)
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize))
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -820,7 +1069,7 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo
|
||||
|
||||
// 旧版本上传,家庭云不支持覆盖
|
||||
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||
tempFile, fileMd5, err := stream.CacheFullInTempFileAndHash(file, utils.MD5)
|
||||
tempFile, fileMd5, err := stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -3,7 +3,9 @@ package alias
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
@ -11,8 +13,11 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
)
|
||||
|
||||
type Alias struct {
|
||||
@ -74,13 +79,45 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
var ret *model.Object
|
||||
provider := ""
|
||||
for _, dst := range dsts {
|
||||
obj, err := d.get(ctx, path, dst, sub)
|
||||
if err == nil {
|
||||
return obj, nil
|
||||
rawPath := stdpath.Join(dst, sub)
|
||||
obj, err := fs.Get(ctx, rawPath, &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
storage, err := fs.GetStorage(rawPath, &fs.GetStoragesArgs{})
|
||||
if ret == nil {
|
||||
ret = &model.Object{
|
||||
Path: path,
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
}
|
||||
if !d.ProviderPassThrough || err != nil {
|
||||
break
|
||||
}
|
||||
provider = storage.Config().Name
|
||||
} else if err != nil || provider != storage.GetStorage().Driver {
|
||||
provider = ""
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil, errs.ObjectNotFound
|
||||
if ret == nil {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
if provider != "" {
|
||||
return &model.ObjectProvider{
|
||||
Object: *ret,
|
||||
Provider: model.Provider{
|
||||
Provider: provider,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
@ -96,7 +133,27 @@ func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
var objs []model.Obj
|
||||
fsArgs := &fs.ListArgs{NoLog: true, Refresh: args.Refresh}
|
||||
for _, dst := range dsts {
|
||||
tmp, err := d.list(ctx, dst, sub, fsArgs)
|
||||
tmp, err := fs.List(ctx, stdpath.Join(dst, sub), fsArgs)
|
||||
if err == nil {
|
||||
tmp, err = utils.SliceConvert(tmp, func(obj model.Obj) (model.Obj, error) {
|
||||
thumb, ok := model.GetThumb(obj)
|
||||
objRes := model.Object{
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
}
|
||||
if !ok {
|
||||
return &objRes, nil
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
if err == nil {
|
||||
objs = append(objs, tmp...)
|
||||
}
|
||||
@ -110,25 +167,78 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
// proxy || ftp,s3
|
||||
if common.GetApiUrl(ctx) == "" {
|
||||
args.Redirect = false
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
link, err := d.link(ctx, dst, sub, args)
|
||||
if err == nil {
|
||||
if !args.Redirect && len(link.URL) > 0 {
|
||||
// 正常情况下 多并发 仅支持返回URL的驱动
|
||||
// alias套娃alias 可以让crypt、mega等驱动(不返回URL的) 支持并发
|
||||
if d.DownloadConcurrency > 0 {
|
||||
link.Concurrency = d.DownloadConcurrency
|
||||
}
|
||||
if d.DownloadPartSize > 0 {
|
||||
link.PartSize = d.DownloadPartSize * utils.KB
|
||||
}
|
||||
}
|
||||
return link, nil
|
||||
reqPath := stdpath.Join(dst, sub)
|
||||
link, fi, err := d.link(ctx, reqPath, args)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if link == nil {
|
||||
// 重定向且需要通过代理
|
||||
return &model.Link{
|
||||
URL: fmt.Sprintf("%s/p%s?sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(reqPath, true),
|
||||
sign.Sign(reqPath)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
resultLink := *link
|
||||
resultLink.SyncClosers = utils.NewSyncClosers(link)
|
||||
if args.Redirect {
|
||||
return &resultLink, nil
|
||||
}
|
||||
|
||||
if resultLink.ContentLength == 0 {
|
||||
resultLink.ContentLength = fi.GetSize()
|
||||
}
|
||||
if resultLink.MFile != nil {
|
||||
return &resultLink, nil
|
||||
}
|
||||
if d.DownloadConcurrency > 0 {
|
||||
resultLink.Concurrency = d.DownloadConcurrency
|
||||
}
|
||||
if d.DownloadPartSize > 0 {
|
||||
resultLink.PartSize = d.DownloadPartSize * utils.KB
|
||||
}
|
||||
return &resultLink, nil
|
||||
}
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
func (d *Alias) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
root, sub := d.getRootAndPath(args.Obj.GetPath())
|
||||
dsts, ok := d.pathMap[root]
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
rawPath := stdpath.Join(dst, sub)
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(rawPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
other, ok := storage.(driver.Other)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
obj, err := op.GetUnwrap(ctx, storage, actualPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return other.Other(ctx, model.OtherArgs{
|
||||
Obj: obj,
|
||||
Method: args.Method,
|
||||
Data: args.Data,
|
||||
})
|
||||
}
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
if !d.Writable {
|
||||
return errs.PermissionDenied
|
||||
@ -166,7 +276,8 @@ func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
}
|
||||
if len(srcPath) == len(dstPath) {
|
||||
for i := range srcPath {
|
||||
err = errors.Join(err, fs.Move(ctx, *srcPath[i], *dstPath[i]))
|
||||
_, e := fs.Move(ctx, *srcPath[i], *dstPath[i])
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
return err
|
||||
} else {
|
||||
@ -250,20 +361,29 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer,
|
||||
reqPath, err := d.getReqPath(ctx, dstDir, true)
|
||||
if err == nil {
|
||||
if len(reqPath) == 1 {
|
||||
return fs.PutDirectly(ctx, *reqPath[0], s)
|
||||
} else {
|
||||
defer s.Close()
|
||||
file, err := s.CacheFullInTempFile()
|
||||
storage, reqActualPath, err := op.GetStorageAndActualPath(*reqPath[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, path := range reqPath {
|
||||
return op.Put(ctx, storage, reqActualPath, &stream.FileStream{
|
||||
Obj: s,
|
||||
Mimetype: s.GetMimetype(),
|
||||
Reader: s,
|
||||
}, up)
|
||||
} else {
|
||||
file, err := s.CacheFullAndWriter(nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
count := float64(len(reqPath) + 1)
|
||||
up(100 / count)
|
||||
for i, path := range reqPath {
|
||||
err = errors.Join(err, fs.PutDirectly(ctx, *path, &stream.FileStream{
|
||||
Obj: s,
|
||||
Mimetype: s.GetMimetype(),
|
||||
WebPutAsTask: s.NeedStore(),
|
||||
Reader: file,
|
||||
Obj: s,
|
||||
Mimetype: s.GetMimetype(),
|
||||
Reader: file,
|
||||
}))
|
||||
up(float64(i+2) / float64(count) * 100)
|
||||
_, e := file.Seek(0, io.SeekStart)
|
||||
if e != nil {
|
||||
return errors.Join(err, e)
|
||||
@ -335,18 +455,24 @@ func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveIn
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
link, err := d.extract(ctx, dst, sub, args)
|
||||
if err == nil {
|
||||
if !args.Redirect && len(link.URL) > 0 {
|
||||
if d.DownloadConcurrency > 0 {
|
||||
link.Concurrency = d.DownloadConcurrency
|
||||
}
|
||||
if d.DownloadPartSize > 0 {
|
||||
link.PartSize = d.DownloadPartSize * utils.KB
|
||||
}
|
||||
}
|
||||
return link, nil
|
||||
reqPath := stdpath.Join(dst, sub)
|
||||
link, err := d.extract(ctx, reqPath, args)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if link == nil {
|
||||
return &model.Link{
|
||||
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(reqPath, true),
|
||||
utils.EncodePath(args.InnerPath, true),
|
||||
url.QueryEscape(args.Password),
|
||||
sign.SignArchive(reqPath)),
|
||||
}, nil
|
||||
}
|
||||
resultLink := *link
|
||||
resultLink.SyncClosers = utils.NewSyncClosers(link)
|
||||
return &resultLink, nil
|
||||
}
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ type Addition struct {
|
||||
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
|
||||
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
|
||||
Writable bool `json:"writable" type:"bool" default:"false"`
|
||||
ProviderPassThrough bool `json:"provider_pass_through" type:"bool" default:"false"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -2,8 +2,6 @@ package alias
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
@ -12,8 +10,6 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
)
|
||||
|
||||
@ -54,79 +50,22 @@ func (d *Alias) getRootAndPath(path string) (string, string) {
|
||||
return parts[0], parts[1]
|
||||
}
|
||||
|
||||
func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Obj, error) {
|
||||
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.Object{
|
||||
Path: path,
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Alias) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]model.Obj, error) {
|
||||
objs, err := fs.List(ctx, stdpath.Join(dst, sub), args)
|
||||
// the obj must implement the model.SetPath interface
|
||||
// return objs, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(objs, func(obj model.Obj) (model.Obj, error) {
|
||||
thumb, ok := model.GetThumb(obj)
|
||||
objRes := model.Object{
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
}
|
||||
if !ok {
|
||||
return &objRes, nil
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs) (*model.Link, error) {
|
||||
reqPath := stdpath.Join(dst, sub)
|
||||
// 参考 crypt 驱动
|
||||
func (d *Alias) link(ctx context.Context, reqPath string, args model.LinkArgs) (*model.Link, model.Obj, error) {
|
||||
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
useRawLink := len(common.GetApiUrl(ctx)) == 0 // ftp、s3
|
||||
if !useRawLink {
|
||||
_, ok := storage.(*Alias)
|
||||
useRawLink = !ok && !args.Redirect
|
||||
if !args.Redirect {
|
||||
return op.Link(ctx, storage, reqActualPath, args)
|
||||
}
|
||||
if useRawLink {
|
||||
link, _, err := op.Link(ctx, storage, reqActualPath, args)
|
||||
return link, err
|
||||
}
|
||||
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||
obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if common.ShouldProxy(storage, stdpath.Base(sub)) {
|
||||
link := &model.Link{
|
||||
URL: fmt.Sprintf("%s/p%s?sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(reqPath, true),
|
||||
sign.Sign(reqPath)),
|
||||
}
|
||||
return link, nil
|
||||
if common.ShouldProxy(storage, stdpath.Base(reqPath)) {
|
||||
return nil, obj, nil
|
||||
}
|
||||
link, _, err := op.Link(ctx, storage, reqActualPath, args)
|
||||
return link, err
|
||||
return op.Link(ctx, storage, reqActualPath, args)
|
||||
}
|
||||
|
||||
func (d *Alias) getReqPath(ctx context.Context, obj model.Obj, isParent bool) ([]*string, error) {
|
||||
@ -197,8 +136,7 @@ func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.Arc
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
reqPath := stdpath.Join(dst, sub)
|
||||
func (d *Alias) extract(ctx context.Context, reqPath string, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -206,20 +144,12 @@ func (d *Alias) extract(ctx context.Context, dst, sub string, args model.Archive
|
||||
if _, ok := storage.(driver.ArchiveReader); !ok {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(sub)) {
|
||||
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(reqPath)) {
|
||||
_, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||
if err == nil {
|
||||
return nil, err
|
||||
}
|
||||
link := &model.Link{
|
||||
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(reqPath, true),
|
||||
utils.EncodePath(args.InnerPath, true),
|
||||
url.QueryEscape(args.Password),
|
||||
sign.SignArchive(reqPath)),
|
||||
}
|
||||
return link, nil
|
||||
return nil, nil
|
||||
}
|
||||
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
|
||||
return link, err
|
||||
|
@ -165,7 +165,7 @@ func (d *AliDrive) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
|
||||
file := stream.FileStream{
|
||||
file := &stream.FileStream{
|
||||
Obj: streamer,
|
||||
Reader: streamer,
|
||||
Mimetype: streamer.GetMimetype(),
|
||||
@ -209,7 +209,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||
io.Closer
|
||||
}{
|
||||
Reader: io.MultiReader(buf, file),
|
||||
Closer: &file,
|
||||
Closer: file,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -297,11 +297,10 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||
if d.InternalUpload {
|
||||
url = partInfo.InternalUploadUrl
|
||||
}
|
||||
req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT))
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, io.LimitReader(rateLimited, DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -3,7 +3,6 @@ package aliyundrive_open
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"time"
|
||||
@ -13,7 +12,6 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/rateg"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -24,9 +22,8 @@ type AliyundriveOpen struct {
|
||||
|
||||
DriveId string
|
||||
|
||||
limitList func(ctx context.Context, data base.Json) (*Files, error)
|
||||
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
|
||||
ref *AliyundriveOpen
|
||||
limiter *limiter
|
||||
ref *AliyundriveOpen
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Config() driver.Config {
|
||||
@ -38,25 +35,23 @@ func (d *AliyundriveOpen) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Init(ctx context.Context) error {
|
||||
d.limiter = getLimiterForUser(globalLimiterUserID) // First create a globally shared limiter to limit the initial requests.
|
||||
if d.LIVPDownloadFormat == "" {
|
||||
d.LIVPDownloadFormat = "jpeg"
|
||||
}
|
||||
if d.DriveType == "" {
|
||||
d.DriveType = "default"
|
||||
}
|
||||
res, err := d.request("/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
|
||||
res, err := d.request(ctx, limiterOther, "/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
|
||||
if err != nil {
|
||||
d.limiter.free()
|
||||
d.limiter = nil
|
||||
return err
|
||||
}
|
||||
d.DriveId = utils.Json.Get(res, d.DriveType+"_drive_id").ToString()
|
||||
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
|
||||
Limit: 4,
|
||||
Bucket: 1,
|
||||
})
|
||||
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
|
||||
Limit: 1,
|
||||
Bucket: 1,
|
||||
})
|
||||
userid := utils.Json.Get(res, "user_id").ToString()
|
||||
d.limiter.free()
|
||||
d.limiter = getLimiterForUser(userid) // Allocate a corresponding limiter for each user.
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -70,6 +65,8 @@ func (d *AliyundriveOpen) InitReference(storage driver.Driver) error {
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Drop(ctx context.Context) error {
|
||||
d.limiter.free()
|
||||
d.limiter = nil
|
||||
d.ref = nil
|
||||
return nil
|
||||
}
|
||||
@ -87,9 +84,6 @@ func (d *AliyundriveOpen) GetRoot(ctx context.Context) (model.Obj, error) {
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if d.limitList == nil {
|
||||
return nil, fmt.Errorf("driver not init")
|
||||
}
|
||||
files, err := d.getFiles(ctx, dir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -107,8 +101,8 @@ func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.Li
|
||||
return objs, err
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link, error) {
|
||||
res, err := d.request("/adrive/v1.0/openFile/getDownloadUrl", http.MethodPost, func(req *resty.Request) {
|
||||
func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
res, err := d.request(ctx, limiterLink, "/adrive/v1.0/openFile/getDownloadUrl", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": file.GetID(),
|
||||
@ -132,17 +126,10 @@ func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if d.limitLink == nil {
|
||||
return nil, fmt.Errorf("driver not init")
|
||||
}
|
||||
return d.limitLink(ctx, file)
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
nowTime, _ := getNowTime()
|
||||
newDir := File{CreatedAt: nowTime, UpdatedAt: nowTime}
|
||||
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"parent_file_id": parentDir.GetID(),
|
||||
@ -168,7 +155,7 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
|
||||
|
||||
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var resp MoveOrCopyResp
|
||||
_, err := d.request("/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": srcObj.GetID(),
|
||||
@ -198,7 +185,7 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (m
|
||||
|
||||
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
var newFile File
|
||||
_, err := d.request("/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": srcObj.GetID(),
|
||||
@ -230,7 +217,7 @@ func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName
|
||||
|
||||
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
var resp MoveOrCopyResp
|
||||
_, err := d.request("/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": srcObj.GetID(),
|
||||
@ -256,7 +243,7 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if d.RemoveWay == "delete" {
|
||||
uri = "/adrive/v1.0/openFile/delete"
|
||||
}
|
||||
_, err := d.request(uri, http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, uri, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": obj.GetID(),
|
||||
@ -295,7 +282,7 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
|
||||
default:
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
_, err := d.request(uri, http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, uri, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetResult(&resp)
|
||||
})
|
||||
if err != nil {
|
||||
@ -304,6 +291,21 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
res, err := d.request(ctx, limiterOther, "/adrive/v1.0/user/getSpaceInfo", http.MethodPost, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total := utils.Json.Get(res, "personal_space_info", "total_size").ToUint64()
|
||||
used := utils.Json.Get(res, "personal_space_info", "used_size").ToUint64()
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: total - used,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*AliyundriveOpen)(nil)
|
||||
var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
|
||||
var _ driver.MoveResult = (*AliyundriveOpen)(nil)
|
||||
|
96
drivers/aliyundrive_open/limiter.go
Normal file
96
drivers/aliyundrive_open/limiter.go
Normal file
@ -0,0 +1,96 @@
|
||||
package aliyundrive_open
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
// See document https://www.yuque.com/aliyundrive/zpfszx/mqocg38hlxzc5vcd
|
||||
// See issue https://github.com/OpenListTeam/OpenList/issues/724
|
||||
// We got limit per user per app, so the limiter should be global.
|
||||
|
||||
type limiterType int
|
||||
|
||||
const (
|
||||
limiterList limiterType = iota
|
||||
limiterLink
|
||||
limiterOther
|
||||
)
|
||||
|
||||
const (
|
||||
listRateLimit = 3.9 // 4 per second in document, but we use 3.9 per second to be safe
|
||||
linkRateLimit = 0.9 // 1 per second in document, but we use 0.9 per second to be safe
|
||||
otherRateLimit = 14.9 // 15 per second in document, but we use 14.9 per second to be safe
|
||||
globalLimiterUserID = "" // Global limiter user ID, used to limit the initial requests.
|
||||
)
|
||||
|
||||
type limiter struct {
|
||||
usedBy int
|
||||
list *rate.Limiter
|
||||
link *rate.Limiter
|
||||
other *rate.Limiter
|
||||
}
|
||||
|
||||
var limiters = make(map[string]*limiter)
|
||||
var limitersLock = &sync.Mutex{}
|
||||
|
||||
func getLimiterForUser(userid string) *limiter {
|
||||
limitersLock.Lock()
|
||||
defer limitersLock.Unlock()
|
||||
defer func() {
|
||||
// Clean up limiters that are no longer used.
|
||||
for id, lim := range limiters {
|
||||
if lim.usedBy <= 0 && id != globalLimiterUserID { // Do not delete the global limiter.
|
||||
delete(limiters, id)
|
||||
}
|
||||
}
|
||||
}()
|
||||
if lim, ok := limiters[userid]; ok {
|
||||
lim.usedBy++
|
||||
return lim
|
||||
}
|
||||
lim := &limiter{
|
||||
usedBy: 1,
|
||||
list: rate.NewLimiter(rate.Limit(listRateLimit), 1),
|
||||
link: rate.NewLimiter(rate.Limit(linkRateLimit), 1),
|
||||
other: rate.NewLimiter(rate.Limit(otherRateLimit), 1),
|
||||
}
|
||||
limiters[userid] = lim
|
||||
return lim
|
||||
}
|
||||
|
||||
func (l *limiter) wait(ctx context.Context, typ limiterType) error {
|
||||
if l == nil {
|
||||
return fmt.Errorf("driver not init")
|
||||
}
|
||||
switch typ {
|
||||
case limiterList:
|
||||
return l.list.Wait(ctx)
|
||||
case limiterLink:
|
||||
return l.link.Wait(ctx)
|
||||
case limiterOther:
|
||||
return l.other.Wait(ctx)
|
||||
default:
|
||||
return fmt.Errorf("unknown limiter type")
|
||||
}
|
||||
}
|
||||
func (l *limiter) free() {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
limitersLock.Lock()
|
||||
defer limitersLock.Unlock()
|
||||
l.usedBy--
|
||||
}
|
||||
func (d *AliyundriveOpen) wait(ctx context.Context, typ limiterType) error {
|
||||
if d == nil {
|
||||
return fmt.Errorf("driver not init")
|
||||
}
|
||||
if d.ref != nil {
|
||||
return d.ref.wait(ctx, typ) // If this is a reference driver, wait on the reference driver.
|
||||
}
|
||||
return d.limiter.wait(ctx, typ)
|
||||
}
|
@ -12,6 +12,7 @@ type Addition struct {
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
|
||||
UseOnlineAPI bool `json:"use_online_api" default:"true"`
|
||||
AlipanType string `json:"alipan_type" required:"true" type:"select" default:"default" options:"default,alipanTV"`
|
||||
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/alicloud/renewapi"`
|
||||
ClientID string `json:"client_id" help:"Keep it empty if you don't have one"`
|
||||
ClientSecret string `json:"client_secret" help:"Keep it empty if you don't have one"`
|
||||
@ -24,12 +25,6 @@ type Addition struct {
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "AliyundriveOpen",
|
||||
LocalSort: false,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "root",
|
||||
NoOverwriteUpload: true,
|
||||
}
|
||||
|
@ -50,10 +50,10 @@ func calPartSize(fileSize int64) int64 {
|
||||
return partSize
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]PartInfo, error) {
|
||||
func (d *AliyundriveOpen) getUploadUrl(ctx context.Context, count int, fileId, uploadId string) ([]PartInfo, error) {
|
||||
partInfoList := makePartInfos(count)
|
||||
var resp CreateResp
|
||||
_, err := d.request("/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": fileId,
|
||||
@ -69,7 +69,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
|
||||
if d.InternalUpload {
|
||||
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "PUT", uploadUrl, r)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -84,10 +84,10 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) completeUpload(fileId, uploadId string) (model.Obj, error) {
|
||||
func (d *AliyundriveOpen) completeUpload(ctx context.Context, fileId, uploadId string) (model.Obj, error) {
|
||||
// 3. complete
|
||||
var newFile File
|
||||
_, err := d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": fileId,
|
||||
@ -137,11 +137,8 @@ func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error
|
||||
}
|
||||
buf := make([]byte, length)
|
||||
n, err := io.ReadFull(reader, buf)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return "", fmt.Errorf("can't read data, expected=%d, got=%d", len(buf), n)
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
if n != int(length) {
|
||||
return "", fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(buf), nil
|
||||
}
|
||||
@ -183,7 +180,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||
createData["pre_hash"] = hash
|
||||
}
|
||||
var createResp CreateResp
|
||||
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
_, err, e := d.requestReturnErrResp(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(createData).SetResult(&createResp)
|
||||
})
|
||||
if err != nil {
|
||||
@ -194,7 +191,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||
|
||||
hash := stream.GetHash().GetHash(utils.SHA1)
|
||||
if len(hash) != utils.SHA1.Width {
|
||||
_, hash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA1)
|
||||
_, hash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -208,7 +205,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cal proof code error: %s", err.Error())
|
||||
}
|
||||
_, err = d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
_, err = d.request(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(createData).SetResult(&createResp)
|
||||
})
|
||||
if err != nil {
|
||||
@ -219,17 +216,20 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||
if !createResp.RapidUpload {
|
||||
// 2. normal upload
|
||||
log.Debugf("[aliyundive_open] normal upload")
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize), &up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
preTime := time.Now()
|
||||
var offset, length int64 = 0, partSize
|
||||
//var length
|
||||
for i := 0; i < len(createResp.PartInfoList); i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
// refresh upload url if 50 minutes passed
|
||||
if time.Since(preTime) > 50*time.Minute {
|
||||
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
|
||||
createResp.PartInfoList, err = d.getUploadUrl(ctx, count, createResp.FileId, createResp.UploadId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -238,22 +238,19 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||
if remain := stream.GetSize() - offset; length > remain {
|
||||
length = remain
|
||||
}
|
||||
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
||||
if rapidUpload {
|
||||
srd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rd = utils.NewMultiReadable(srd)
|
||||
rd, err := ss.GetSectionReader(offset, length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
||||
err = retry.Do(func() error {
|
||||
_ = rd.Reset()
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
||||
rd.Seek(0, io.SeekStart)
|
||||
return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i])
|
||||
},
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second))
|
||||
ss.FreeSectionReader(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -266,5 +263,5 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||
|
||||
log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp)
|
||||
// 3. complete
|
||||
return d.completeUpload(createResp.FileId, createResp.UploadId)
|
||||
return d.completeUpload(ctx, createResp.FileId, createResp.UploadId)
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
|
||||
func (d *AliyundriveOpen) _refreshToken(ctx context.Context) (string, string, error) {
|
||||
if d.UseOnlineAPI && d.APIAddress != "" {
|
||||
u := d.APIAddress
|
||||
var resp struct {
|
||||
@ -27,13 +27,23 @@ func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
|
||||
AccessToken string `json:"access_token"`
|
||||
ErrorMessage string `json:"text"`
|
||||
}
|
||||
_, err := base.RestyClient.R().
|
||||
|
||||
// 根据AlipanType选项设置driver_txt
|
||||
driverTxt := "alicloud_qr"
|
||||
if d.AlipanType == "alipanTV" {
|
||||
driverTxt = "alicloud_tv"
|
||||
}
|
||||
err := d.wait(ctx, limiterOther)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
_, err = base.RestyClient.R().
|
||||
SetHeader("User-Agent", "Mozilla/5.0 (Macintosh; Apple macOS 15_5) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36 Chrome/138.0.0.0 Openlist/425.6.30").
|
||||
SetResult(&resp).
|
||||
SetQueryParams(map[string]string{
|
||||
"refresh_ui": d.RefreshToken,
|
||||
"server_use": "true",
|
||||
"driver_txt": "alicloud_qr",
|
||||
"driver_txt": driverTxt,
|
||||
}).
|
||||
Get(u)
|
||||
if err != nil {
|
||||
@ -47,11 +57,14 @@ func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
|
||||
}
|
||||
return resp.RefreshToken, resp.AccessToken, nil
|
||||
}
|
||||
|
||||
// 本地刷新逻辑,必须要求 client_id 和 client_secret
|
||||
if d.ClientID == "" || d.ClientSecret == "" {
|
||||
return "", "", fmt.Errorf("empty ClientID or ClientSecret")
|
||||
}
|
||||
err := d.wait(ctx, limiterOther)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
url := API_URL + "/oauth/access_token"
|
||||
//var resp base.TokenResp
|
||||
var e ErrResp
|
||||
@ -103,18 +116,18 @@ func getSub(token string) (string, error) {
|
||||
return utils.Json.Get(bs, "sub").ToString(), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) refreshToken() error {
|
||||
func (d *AliyundriveOpen) refreshToken(ctx context.Context) error {
|
||||
if d.ref != nil {
|
||||
return d.ref.refreshToken()
|
||||
return d.ref.refreshToken(ctx)
|
||||
}
|
||||
refresh, access, err := d._refreshToken()
|
||||
refresh, access, err := d._refreshToken(ctx)
|
||||
for i := 0; i < 3; i++ {
|
||||
if err == nil {
|
||||
break
|
||||
} else {
|
||||
log.Errorf("[ali_open] failed to refresh token: %s", err)
|
||||
}
|
||||
refresh, access, err = d._refreshToken()
|
||||
refresh, access, err = d._refreshToken(ctx)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@ -125,12 +138,12 @@ func (d *AliyundriveOpen) refreshToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
|
||||
b, err, _ := d.requestReturnErrResp(uri, method, callback, retry...)
|
||||
func (d *AliyundriveOpen) request(ctx context.Context, limitTy limiterType, uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
|
||||
b, err, _ := d.requestReturnErrResp(ctx, limitTy, uri, method, callback, retry...)
|
||||
return b, err
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
|
||||
func (d *AliyundriveOpen) requestReturnErrResp(ctx context.Context, limitTy limiterType, uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
|
||||
req := base.RestyClient.R()
|
||||
// TODO check whether access_token is expired
|
||||
req.SetHeader("Authorization", "Bearer "+d.getAccessToken())
|
||||
@ -142,6 +155,10 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
|
||||
}
|
||||
var e ErrResp
|
||||
req.SetError(&e)
|
||||
err := d.wait(ctx, limitTy)
|
||||
if err != nil {
|
||||
return nil, err, nil
|
||||
}
|
||||
res, err := req.Execute(method, API_URL+uri)
|
||||
if err != nil {
|
||||
if res != nil {
|
||||
@ -152,11 +169,11 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
|
||||
isRetry := len(retry) > 0 && retry[0]
|
||||
if e.Code != "" {
|
||||
if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.getAccessToken() == "") {
|
||||
err = d.refreshToken()
|
||||
err = d.refreshToken(ctx)
|
||||
if err != nil {
|
||||
return nil, err, nil
|
||||
}
|
||||
return d.requestReturnErrResp(uri, method, callback, true)
|
||||
return d.requestReturnErrResp(ctx, limitTy, uri, method, callback, true)
|
||||
}
|
||||
return nil, fmt.Errorf("%s:%s", e.Code, e.Message), &e
|
||||
}
|
||||
@ -165,7 +182,7 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
|
||||
|
||||
func (d *AliyundriveOpen) list(ctx context.Context, data base.Json) (*Files, error) {
|
||||
var resp Files
|
||||
_, err := d.request("/adrive/v1.0/openFile/list", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterList, "/adrive/v1.0/openFile/list", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetResult(&resp)
|
||||
})
|
||||
if err != nil {
|
||||
@ -194,7 +211,7 @@ func (d *AliyundriveOpen) getFiles(ctx context.Context, fileId string) ([]File,
|
||||
//"video_thumbnail_width": 480,
|
||||
//"image_thumbnail_width": 480,
|
||||
}
|
||||
resp, err := d.limitList(ctx, data)
|
||||
resp, err := d.list(ctx, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -2,7 +2,6 @@ package aliyundrive_share
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
@ -12,7 +11,6 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/rateg"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -25,8 +23,7 @@ type AliyundriveShare struct {
|
||||
DriveId string
|
||||
cron *cron.Cron
|
||||
|
||||
limitList func(ctx context.Context, dir model.Obj) ([]model.Obj, error)
|
||||
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
|
||||
limiter *limiter
|
||||
}
|
||||
|
||||
func (d *AliyundriveShare) Config() driver.Config {
|
||||
@ -38,29 +35,26 @@ func (d *AliyundriveShare) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *AliyundriveShare) Init(ctx context.Context) error {
|
||||
err := d.refreshToken()
|
||||
d.limiter = getLimiter()
|
||||
err := d.refreshToken(ctx)
|
||||
if err != nil {
|
||||
d.limiter.free()
|
||||
d.limiter = nil
|
||||
return err
|
||||
}
|
||||
err = d.getShareToken()
|
||||
err = d.getShareToken(ctx)
|
||||
if err != nil {
|
||||
d.limiter.free()
|
||||
d.limiter = nil
|
||||
return err
|
||||
}
|
||||
d.cron = cron.NewCron(time.Hour * 2)
|
||||
d.cron.Do(func() {
|
||||
err := d.refreshToken()
|
||||
err := d.refreshToken(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
}
|
||||
})
|
||||
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
|
||||
Limit: 4,
|
||||
Bucket: 1,
|
||||
})
|
||||
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
|
||||
Limit: 1,
|
||||
Bucket: 1,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -68,19 +62,14 @@ func (d *AliyundriveShare) Drop(ctx context.Context) error {
|
||||
if d.cron != nil {
|
||||
d.cron.Stop()
|
||||
}
|
||||
d.limiter.free()
|
||||
d.limiter = nil
|
||||
d.DriveId = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if d.limitList == nil {
|
||||
return nil, fmt.Errorf("driver not init")
|
||||
}
|
||||
return d.limitList(ctx, dir)
|
||||
}
|
||||
|
||||
func (d *AliyundriveShare) list(ctx context.Context, dir model.Obj) ([]model.Obj, error) {
|
||||
files, err := d.getFiles(dir.GetID())
|
||||
files, err := d.getFiles(ctx, dir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -90,13 +79,6 @@ func (d *AliyundriveShare) list(ctx context.Context, dir model.Obj) ([]model.Obj
|
||||
}
|
||||
|
||||
func (d *AliyundriveShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if d.limitLink == nil {
|
||||
return nil, fmt.Errorf("driver not init")
|
||||
}
|
||||
return d.limitLink(ctx, file)
|
||||
}
|
||||
|
||||
func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Link, error) {
|
||||
data := base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": file.GetID(),
|
||||
@ -105,7 +87,7 @@ func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Lin
|
||||
"share_id": d.ShareId,
|
||||
}
|
||||
var resp ShareLinkResp
|
||||
_, err := d.request("https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterLink, "https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetHeader(CanaryHeaderKey, CanaryHeaderValue).SetBody(data).SetResult(&resp)
|
||||
})
|
||||
if err != nil {
|
||||
@ -135,7 +117,7 @@ func (d *AliyundriveShare) Other(ctx context.Context, args model.OtherArgs) (int
|
||||
default:
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
_, err := d.request(url, http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request(ctx, limiterOther, url, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetResult(&resp)
|
||||
})
|
||||
if err != nil {
|
||||
|
67
drivers/aliyundrive_share/limiter.go
Normal file
67
drivers/aliyundrive_share/limiter.go
Normal file
@ -0,0 +1,67 @@
|
||||
package aliyundrive_share
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
// See issue https://github.com/OpenListTeam/OpenList/issues/724
|
||||
// Seems there is no limit per user.
|
||||
|
||||
type limiterType int
|
||||
|
||||
const (
|
||||
limiterList limiterType = iota
|
||||
limiterLink
|
||||
limiterOther
|
||||
)
|
||||
|
||||
const (
|
||||
listRateLimit = 3.9 // 4 per second in document, but we use 3.9 per second to be safe
|
||||
linkRateLimit = 0.9 // 1 per second in document, but we use 0.9 per second to be safe
|
||||
otherRateLimit = 14.9 // 15 per second in document, but we use 14.9 per second to be safe
|
||||
)
|
||||
|
||||
type limiter struct {
|
||||
list *rate.Limiter
|
||||
link *rate.Limiter
|
||||
other *rate.Limiter
|
||||
}
|
||||
|
||||
func getLimiter() *limiter {
|
||||
return &limiter{
|
||||
list: rate.NewLimiter(rate.Limit(listRateLimit), 1),
|
||||
link: rate.NewLimiter(rate.Limit(linkRateLimit), 1),
|
||||
other: rate.NewLimiter(rate.Limit(otherRateLimit), 1),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *limiter) wait(ctx context.Context, typ limiterType) error {
|
||||
if l == nil {
|
||||
return fmt.Errorf("driver not init")
|
||||
}
|
||||
switch typ {
|
||||
case limiterList:
|
||||
return l.list.Wait(ctx)
|
||||
case limiterLink:
|
||||
return l.link.Wait(ctx)
|
||||
case limiterOther:
|
||||
return l.other.Wait(ctx)
|
||||
default:
|
||||
return fmt.Errorf("unknown limiter type")
|
||||
}
|
||||
}
|
||||
func (l *limiter) free() {
|
||||
|
||||
}
|
||||
func (d *AliyundriveShare) wait(ctx context.Context, typ limiterType) error {
|
||||
if d == nil {
|
||||
return fmt.Errorf("driver not init")
|
||||
}
|
||||
//if d.ref != nil {
|
||||
// return d.ref.wait(ctx, typ) // If this is a reference driver, wait on the reference driver.
|
||||
//}
|
||||
return d.limiter.wait(ctx, typ)
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
package aliyundrive_share
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
@ -15,11 +16,15 @@ const (
|
||||
CanaryHeaderValue = "client=web,app=share,version=v2.3.1"
|
||||
)
|
||||
|
||||
func (d *AliyundriveShare) refreshToken() error {
|
||||
func (d *AliyundriveShare) refreshToken(ctx context.Context) error {
|
||||
err := d.wait(ctx, limiterOther)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
url := "https://auth.alipan.com/v2/account/token"
|
||||
var resp base.TokenResp
|
||||
var e ErrorResp
|
||||
_, err := base.RestyClient.R().
|
||||
_, err = base.RestyClient.R().
|
||||
SetBody(base.Json{"refresh_token": d.RefreshToken, "grant_type": "refresh_token"}).
|
||||
SetResult(&resp).
|
||||
SetError(&e).
|
||||
@ -36,7 +41,11 @@ func (d *AliyundriveShare) refreshToken() error {
|
||||
}
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
func (d *AliyundriveShare) getShareToken() error {
|
||||
func (d *AliyundriveShare) getShareToken(ctx context.Context) error {
|
||||
err := d.wait(ctx, limiterOther)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data := base.Json{
|
||||
"share_id": d.ShareId,
|
||||
}
|
||||
@ -45,7 +54,7 @@ func (d *AliyundriveShare) getShareToken() error {
|
||||
}
|
||||
var e ErrorResp
|
||||
var resp ShareTokenResp
|
||||
_, err := base.RestyClient.R().
|
||||
_, err = base.RestyClient.R().
|
||||
SetResult(&resp).SetError(&e).SetBody(data).
|
||||
Post("https://api.alipan.com/v2/share_link/get_share_token")
|
||||
if err != nil {
|
||||
@ -58,7 +67,7 @@ func (d *AliyundriveShare) getShareToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback) ([]byte, error) {
|
||||
func (d *AliyundriveShare) request(ctx context.Context, limitTy limiterType, url, method string, callback base.ReqCallback) ([]byte, error) {
|
||||
var e ErrorResp
|
||||
req := base.RestyClient.R().
|
||||
SetError(&e).
|
||||
@ -71,6 +80,10 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
|
||||
} else {
|
||||
req.SetBody("{}")
|
||||
}
|
||||
err := d.wait(ctx, limitTy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := req.Execute(method, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -78,14 +91,14 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
|
||||
if e.Code != "" {
|
||||
if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" {
|
||||
if e.Code == "AccessTokenInvalid" {
|
||||
err = d.refreshToken()
|
||||
err = d.refreshToken(ctx)
|
||||
} else {
|
||||
err = d.getShareToken()
|
||||
err = d.getShareToken(ctx)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.request(url, method, callback)
|
||||
return d.request(ctx, limitTy, url, method, callback)
|
||||
} else {
|
||||
return nil, errors.New(e.Code + ": " + e.Message)
|
||||
}
|
||||
@ -93,7 +106,7 @@ func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback
|
||||
return resp.Body(), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
|
||||
func (d *AliyundriveShare) getFiles(ctx context.Context, fileId string) ([]File, error) {
|
||||
files := make([]File, 0)
|
||||
data := base.Json{
|
||||
"image_thumbnail_process": "image/resize,w_160/format,jpeg",
|
||||
@ -110,6 +123,10 @@ func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
|
||||
if data["marker"] == "first" {
|
||||
data["marker"] = ""
|
||||
}
|
||||
err := d.wait(ctx, limiterList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var e ErrorResp
|
||||
var resp ListResp
|
||||
res, err := base.RestyClient.R().
|
||||
@ -123,11 +140,11 @@ func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
|
||||
log.Debugf("aliyundrive share get files: %s", res.String())
|
||||
if e.Code != "" {
|
||||
if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" {
|
||||
err = d.getShareToken()
|
||||
err = d.getShareToken(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.getFiles(fileId)
|
||||
return d.getFiles(ctx, fileId)
|
||||
}
|
||||
return nil, errors.New(e.Message)
|
||||
}
|
||||
|
@ -20,9 +20,12 @@ import (
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_netdisk"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_photo"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/chaoxing"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/chunk"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve_v4"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/cnb_releases"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/crypt"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/degoo"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao_share"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/dropbox"
|
||||
@ -48,6 +51,7 @@ import (
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_app"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_sharelink"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist_share"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak_share"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_open"
|
||||
@ -59,11 +63,11 @@ import (
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/smb"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/strm"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/teambition"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/teldrive"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/terabox"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder_browser"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunderx"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/trainbit"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/url_tree"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/uss"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/virtual"
|
||||
|
@ -203,11 +203,12 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
|
||||
streamSize := stream.GetSize()
|
||||
sliceSize := d.getSliceSize(streamSize)
|
||||
count := int(streamSize / sliceSize)
|
||||
count := 1
|
||||
if streamSize > sliceSize {
|
||||
count = int((streamSize + sliceSize - 1) / sliceSize)
|
||||
}
|
||||
lastBlockSize := streamSize % sliceSize
|
||||
if lastBlockSize > 0 {
|
||||
count++
|
||||
} else {
|
||||
if lastBlockSize == 0 {
|
||||
lastBlockSize = sliceSize
|
||||
}
|
||||
|
||||
@ -363,4 +364,12 @@ func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
du, err := d.quota()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{DiskUsage: *du}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*BaiduNetdisk)(nil)
|
||||
|
@ -189,3 +189,12 @@ type PrecreateResp struct {
|
||||
// return_type=2
|
||||
File File `json:"info"`
|
||||
}
|
||||
|
||||
type QuotaResp struct {
|
||||
Errno int `json:"errno"`
|
||||
RequestId int64 `json:"request_id"`
|
||||
Total uint64 `json:"total"`
|
||||
Used uint64 `json:"used"`
|
||||
//Free uint64 `json:"free"`
|
||||
//Expire bool `json:"expire"`
|
||||
}
|
||||
|
@ -381,6 +381,18 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
||||
return maxSliceSize
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) quota() (*model.DiskUsage, error) {
|
||||
var resp QuotaResp
|
||||
_, err := d.request("https://pan.baidu.com/api/quota", http.MethodGet, nil, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.DiskUsage{
|
||||
TotalSpace: resp.Total,
|
||||
FreeSpace: resp.Total - resp.Used,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// func encodeURIComponent(str string) string {
|
||||
// r := url.QueryEscape(str)
|
||||
// r = strings.ReplaceAll(r, "+", "%20")
|
||||
|
@ -262,11 +262,12 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
|
||||
// 计算需要的数据
|
||||
streamSize := stream.GetSize()
|
||||
count := int(streamSize / DEFAULT)
|
||||
count := 1
|
||||
if streamSize > DEFAULT {
|
||||
count = int((streamSize + DEFAULT - 1) / DEFAULT)
|
||||
}
|
||||
lastBlockSize := streamSize % DEFAULT
|
||||
if lastBlockSize > 0 {
|
||||
count++
|
||||
} else {
|
||||
if lastBlockSize == 0 {
|
||||
lastBlockSize = DEFAULT
|
||||
}
|
||||
|
||||
|
@ -255,7 +255,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
|
||||
},
|
||||
UpdateProgress: up,
|
||||
})
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, "https://pan-yz.chaoxing.com/upload", r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -32,7 +32,6 @@ func init() {
|
||||
config: driver.Config{
|
||||
Name: "ChaoXingGroupDrive",
|
||||
OnlyProxy: true,
|
||||
OnlyLocal: false,
|
||||
DefaultRoot: "-1",
|
||||
NoOverwriteUpload: true,
|
||||
},
|
||||
|
@ -167,7 +167,7 @@ func (d *ChaoXing) Login() (string, error) {
|
||||
return "", err
|
||||
}
|
||||
// Create the request
|
||||
req, err := http.NewRequest("POST", "https://passport2.chaoxing.com/fanyalogin", body)
|
||||
req, err := http.NewRequest(http.MethodPost, "https://passport2.chaoxing.com/fanyalogin", body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
488
drivers/chunk/driver.go
Normal file
488
drivers/chunk/driver.go
Normal file
@ -0,0 +1,488 @@
|
||||
package chunk
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
)
|
||||
|
||||
type Chunk struct {
|
||||
model.Storage
|
||||
Addition
|
||||
}
|
||||
|
||||
func (d *Chunk) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Chunk) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Chunk) Init(ctx context.Context) error {
|
||||
if d.PartSize <= 0 {
|
||||
return errors.New("part size must be positive")
|
||||
}
|
||||
d.RemotePath = utils.FixAndCleanPath(d.RemotePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Chunk) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Chunk) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
if utils.PathEqual(path, "/") {
|
||||
return &model.Object{
|
||||
Name: "Root",
|
||||
IsFolder: true,
|
||||
Path: "/",
|
||||
}, nil
|
||||
}
|
||||
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remoteActualPath = stdpath.Join(remoteActualPath, path)
|
||||
if remoteObj, err := op.Get(ctx, remoteStorage, remoteActualPath); err == nil {
|
||||
return &model.Object{
|
||||
Path: path,
|
||||
Name: remoteObj.GetName(),
|
||||
Size: remoteObj.GetSize(),
|
||||
Modified: remoteObj.ModTime(),
|
||||
IsFolder: remoteObj.IsDir(),
|
||||
HashInfo: remoteObj.GetHash(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
remoteActualDir, name := stdpath.Split(remoteActualPath)
|
||||
chunkName := "[openlist_chunk]" + name
|
||||
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, chunkName), model.ListArgs{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var totalSize int64 = 0
|
||||
// 0号块必须存在
|
||||
chunkSizes := []int64{-1}
|
||||
h := make(map[*utils.HashType]string)
|
||||
var first model.Obj
|
||||
for _, o := range chunkObjs {
|
||||
if o.IsDir() {
|
||||
continue
|
||||
}
|
||||
if after, ok := strings.CutPrefix(o.GetName(), "hash_"); ok {
|
||||
hn, value, ok := strings.Cut(strings.TrimSuffix(after, d.CustomExt), "_")
|
||||
if ok {
|
||||
ht, ok := utils.GetHashByName(hn)
|
||||
if ok {
|
||||
h[ht] = value
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
totalSize += o.GetSize()
|
||||
if len(chunkSizes) > idx {
|
||||
if idx == 0 {
|
||||
first = o
|
||||
}
|
||||
chunkSizes[idx] = o.GetSize()
|
||||
} else if len(chunkSizes) == idx {
|
||||
chunkSizes = append(chunkSizes, o.GetSize())
|
||||
} else {
|
||||
newChunkSizes := make([]int64, idx+1)
|
||||
copy(newChunkSizes, chunkSizes)
|
||||
chunkSizes = newChunkSizes
|
||||
chunkSizes[idx] = o.GetSize()
|
||||
}
|
||||
}
|
||||
// 检查0号块不等于-1 以支持空文件
|
||||
// 如果块数量大于1 最后一块不可能为0
|
||||
// 只检查中间块是否有0
|
||||
for i, l := 0, len(chunkSizes)-2; ; i++ {
|
||||
if i == 0 {
|
||||
if chunkSizes[i] == -1 {
|
||||
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||
}
|
||||
} else if chunkSizes[i] == 0 {
|
||||
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||
}
|
||||
if i >= l {
|
||||
break
|
||||
}
|
||||
}
|
||||
reqDir, _ := stdpath.Split(path)
|
||||
objRes := chunkObject{
|
||||
Object: model.Object{
|
||||
Path: stdpath.Join(reqDir, chunkName),
|
||||
Name: name,
|
||||
Size: totalSize,
|
||||
Modified: first.ModTime(),
|
||||
Ctime: first.CreateTime(),
|
||||
},
|
||||
chunkSizes: chunkSizes,
|
||||
}
|
||||
if len(h) > 0 {
|
||||
objRes.HashInfo = utils.NewHashInfoByMap(h)
|
||||
}
|
||||
return &objRes, nil
|
||||
}
|
||||
|
||||
func (d *Chunk) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remoteActualDir := stdpath.Join(remoteActualPath, dir.GetPath())
|
||||
remoteObjs, err := op.List(ctx, remoteStorage, remoteActualDir, model.ListArgs{
|
||||
ReqPath: args.ReqPath,
|
||||
Refresh: args.Refresh,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([]model.Obj, 0, len(remoteObjs))
|
||||
for _, obj := range remoteObjs {
|
||||
rawName := obj.GetName()
|
||||
if obj.IsDir() {
|
||||
if name, ok := strings.CutPrefix(rawName, "[openlist_chunk]"); ok {
|
||||
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, rawName), model.ListArgs{
|
||||
ReqPath: stdpath.Join(args.ReqPath, rawName),
|
||||
Refresh: args.Refresh,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalSize := int64(0)
|
||||
h := make(map[*utils.HashType]string)
|
||||
first := obj
|
||||
for _, o := range chunkObjs {
|
||||
if o.IsDir() {
|
||||
continue
|
||||
}
|
||||
if after, ok := strings.CutPrefix(strings.TrimSuffix(o.GetName(), d.CustomExt), "hash_"); ok {
|
||||
hn, value, ok := strings.Cut(after, "_")
|
||||
if ok {
|
||||
ht, ok := utils.GetHashByName(hn)
|
||||
if ok {
|
||||
h[ht] = value
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if idx == 0 {
|
||||
first = o
|
||||
}
|
||||
totalSize += o.GetSize()
|
||||
}
|
||||
objRes := model.Object{
|
||||
Name: name,
|
||||
Size: totalSize,
|
||||
Modified: first.ModTime(),
|
||||
Ctime: first.CreateTime(),
|
||||
}
|
||||
if len(h) > 0 {
|
||||
objRes.HashInfo = utils.NewHashInfoByMap(h)
|
||||
}
|
||||
if !d.Thumbnail {
|
||||
result = append(result, &objRes)
|
||||
} else {
|
||||
thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp")
|
||||
thumb := fmt.Sprintf("%s/d%s?sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(thumbPath, true),
|
||||
sign.Sign(thumbPath))
|
||||
result = append(result, &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !d.ShowHidden && strings.HasPrefix(rawName, ".") {
|
||||
continue
|
||||
}
|
||||
thumb, ok := model.GetThumb(obj)
|
||||
objRes := model.Object{
|
||||
Name: rawName,
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
}
|
||||
if !ok {
|
||||
result = append(result, &objRes)
|
||||
} else {
|
||||
result = append(result, &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (d *Chunk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chunkFile, ok := file.(*chunkObject)
|
||||
remoteActualPath = stdpath.Join(remoteActualPath, file.GetPath())
|
||||
if !ok {
|
||||
l, _, err := op.Link(ctx, remoteStorage, remoteActualPath, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resultLink := *l
|
||||
resultLink.SyncClosers = utils.NewSyncClosers(l)
|
||||
return &resultLink, nil
|
||||
}
|
||||
fileSize := chunkFile.GetSize()
|
||||
mergedRrf := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
start := httpRange.Start
|
||||
length := httpRange.Length
|
||||
if length < 0 || start+length > fileSize {
|
||||
length = fileSize - start
|
||||
}
|
||||
if length == 0 {
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
}
|
||||
rs := make([]io.Reader, 0)
|
||||
cs := make(utils.Closers, 0)
|
||||
var (
|
||||
rc io.ReadCloser
|
||||
readFrom bool
|
||||
)
|
||||
for idx, chunkSize := range chunkFile.chunkSizes {
|
||||
if readFrom {
|
||||
l, o, err := op.Link(ctx, remoteStorage, stdpath.Join(remoteActualPath, d.getPartName(idx)), args)
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
cs = append(cs, l)
|
||||
chunkSize2 := l.ContentLength
|
||||
if chunkSize2 <= 0 {
|
||||
chunkSize2 = o.GetSize()
|
||||
}
|
||||
if chunkSize2 != chunkSize {
|
||||
_ = cs.Close()
|
||||
return nil, fmt.Errorf("chunk part[%d] size not match", idx)
|
||||
}
|
||||
rrf, err := stream.GetRangeReaderFromLink(chunkSize2, l)
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
newLength := length - chunkSize2
|
||||
if newLength >= 0 {
|
||||
length = newLength
|
||||
rc, err = rrf.RangeRead(ctx, http_range.Range{Length: -1})
|
||||
} else {
|
||||
rc, err = rrf.RangeRead(ctx, http_range.Range{Length: length})
|
||||
}
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
rs = append(rs, rc)
|
||||
cs = append(cs, rc)
|
||||
if newLength <= 0 {
|
||||
return utils.ReadCloser{
|
||||
Reader: io.MultiReader(rs...),
|
||||
Closer: &cs,
|
||||
}, nil
|
||||
}
|
||||
} else if newStart := start - chunkSize; newStart >= 0 {
|
||||
start = newStart
|
||||
} else {
|
||||
l, o, err := op.Link(ctx, remoteStorage, stdpath.Join(remoteActualPath, d.getPartName(idx)), args)
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
cs = append(cs, l)
|
||||
chunkSize2 := l.ContentLength
|
||||
if chunkSize2 <= 0 {
|
||||
chunkSize2 = o.GetSize()
|
||||
}
|
||||
if chunkSize2 != chunkSize {
|
||||
_ = cs.Close()
|
||||
return nil, fmt.Errorf("chunk part[%d] size not match", idx)
|
||||
}
|
||||
rrf, err := stream.GetRangeReaderFromLink(chunkSize2, l)
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
rc, err = rrf.RangeRead(ctx, http_range.Range{Start: start, Length: -1})
|
||||
if err != nil {
|
||||
_ = cs.Close()
|
||||
return nil, err
|
||||
}
|
||||
length -= chunkSize2 - start
|
||||
cs = append(cs, rc)
|
||||
if length <= 0 {
|
||||
return utils.ReadCloser{
|
||||
Reader: rc,
|
||||
Closer: &cs,
|
||||
}, nil
|
||||
}
|
||||
rs = append(rs, rc)
|
||||
readFrom = true
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("invalid range: start=%d,length=%d,fileSize=%d", httpRange.Start, httpRange.Length, fileSize)
|
||||
}
|
||||
return &model.Link{
|
||||
RangeReader: stream.RangeReaderFunc(mergedRrf),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Chunk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
path := stdpath.Join(d.RemotePath, parentDir.GetPath(), dirName)
|
||||
return fs.MakeDir(ctx, path)
|
||||
}
|
||||
|
||||
func (d *Chunk) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
src := stdpath.Join(d.RemotePath, srcObj.GetPath())
|
||||
dst := stdpath.Join(d.RemotePath, dstDir.GetPath())
|
||||
_, err := fs.Move(ctx, src, dst)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Chunk) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if _, ok := srcObj.(*chunkObject); ok {
|
||||
newName = "[openlist_chunk]" + newName
|
||||
}
|
||||
return fs.Rename(ctx, stdpath.Join(d.RemotePath, srcObj.GetPath()), newName)
|
||||
}
|
||||
|
||||
func (d *Chunk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
dst := stdpath.Join(d.RemotePath, dstDir.GetPath())
|
||||
src := stdpath.Join(d.RemotePath, srcObj.GetPath())
|
||||
_, err := fs.Copy(ctx, src, dst)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Chunk) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return fs.Remove(ctx, stdpath.Join(d.RemotePath, obj.GetPath()))
|
||||
}
|
||||
|
||||
func (d *Chunk) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.Thumbnail && dstDir.GetName() == ".thumbnails" {
|
||||
return op.Put(ctx, remoteStorage, stdpath.Join(remoteActualPath, dstDir.GetPath()), file, up)
|
||||
}
|
||||
upReader := &driver.ReaderUpdatingProgress{
|
||||
Reader: file,
|
||||
UpdateProgress: up,
|
||||
}
|
||||
dst := stdpath.Join(remoteActualPath, dstDir.GetPath(), "[openlist_chunk]"+file.GetName())
|
||||
if d.StoreHash {
|
||||
for ht, value := range file.GetHash().All() {
|
||||
_ = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: fmt.Sprintf("hash_%s_%s%s", ht.Name, value, d.CustomExt),
|
||||
Size: 1,
|
||||
Modified: file.ModTime(),
|
||||
},
|
||||
Mimetype: "application/octet-stream",
|
||||
Reader: bytes.NewReader([]byte{0}), // 兼容不支持空文件的驱动
|
||||
}, nil, true)
|
||||
}
|
||||
}
|
||||
fullPartCount := int(file.GetSize() / d.PartSize)
|
||||
tailSize := file.GetSize() % d.PartSize
|
||||
if tailSize == 0 && fullPartCount > 0 {
|
||||
fullPartCount--
|
||||
tailSize = d.PartSize
|
||||
}
|
||||
partIndex := 0
|
||||
for partIndex < fullPartCount {
|
||||
err = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: d.getPartName(partIndex),
|
||||
Size: d.PartSize,
|
||||
Modified: file.ModTime(),
|
||||
},
|
||||
Mimetype: file.GetMimetype(),
|
||||
Reader: io.LimitReader(upReader, d.PartSize),
|
||||
}, nil, true)
|
||||
if err != nil {
|
||||
_ = op.Remove(ctx, remoteStorage, dst)
|
||||
return err
|
||||
}
|
||||
partIndex++
|
||||
}
|
||||
err = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: d.getPartName(fullPartCount),
|
||||
Size: tailSize,
|
||||
Modified: file.ModTime(),
|
||||
},
|
||||
Mimetype: file.GetMimetype(),
|
||||
Reader: upReader,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
_ = op.Remove(ctx, remoteStorage, dst)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Chunk) getPartName(part int) string {
|
||||
return fmt.Sprintf("%d%s", part, d.CustomExt)
|
||||
}
|
||||
|
||||
func (d *Chunk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
remoteStorage, err := fs.GetStorage(d.RemotePath, &fs.GetStoragesArgs{})
|
||||
if err != nil {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
wd, ok := remoteStorage.(driver.WithDetails)
|
||||
if !ok {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
remoteDetails, err := wd.GetDetails(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: remoteDetails.DiskUsage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Chunk)(nil)
|
31
drivers/chunk/meta.go
Normal file
31
drivers/chunk/meta.go
Normal file
@ -0,0 +1,31 @@
|
||||
package chunk
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
RemotePath string `json:"remote_path" required:"true"`
|
||||
PartSize int64 `json:"part_size" required:"true" type:"number" help:"bytes"`
|
||||
CustomExt string `json:"custom_ext" type:"string"`
|
||||
StoreHash bool `json:"store_hash" type:"bool" default:"true"`
|
||||
|
||||
Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"`
|
||||
ShowHidden bool `json:"show_hidden" default:"true" required:"false" help:"show hidden directories and files"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Chunk",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
NoCache: true,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Chunk{}
|
||||
})
|
||||
}
|
8
drivers/chunk/obj.go
Normal file
8
drivers/chunk/obj.go
Normal file
@ -0,0 +1,8 @@
|
||||
package chunk
|
||||
|
||||
import "github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
|
||||
type chunkObject struct {
|
||||
model.Object
|
||||
chunkSizes []int64
|
||||
}
|
@ -20,6 +20,7 @@ type Addition struct {
|
||||
var config = driver.Config{
|
||||
Name: "Cloudreve",
|
||||
DefaultRoot: "/",
|
||||
LocalSort: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -18,8 +18,10 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/cookie"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
@ -235,13 +237,16 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
|
||||
}
|
||||
|
||||
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadUrl := u.UploadURLs[0]
|
||||
credential := u.Credential
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -249,69 +254,67 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
||||
left := stream.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
rd, err := ss.GetSectionReader(finish, byteSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
err = retry.Do(
|
||||
func() error {
|
||||
rd.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk),
|
||||
driver.NewLimitedUploadStream(ctx, rd))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.ContentLength = byteSize
|
||||
req.Header.Set("Authorization", fmt.Sprint(credential))
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
return fmt.Errorf("server error: %d", res.StatusCode)
|
||||
}
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var up Resp
|
||||
err = json.Unmarshal(body, &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if up.Code != 0 {
|
||||
return errors.New(up.Msg)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.FreeSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Authorization", fmt.Sprint(credential))
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
err = func() error {
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
return errors.New(res.Status)
|
||||
}
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var up Resp
|
||||
err = json.Unmarshal(body, &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if up.Code != 0 {
|
||||
return errors.New(up.Msg)
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if err == nil {
|
||||
retryCount = 0
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
chunk++
|
||||
} else {
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadUrl := u.UploadURLs[0]
|
||||
var finish int64 = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -319,46 +322,45 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
||||
left := stream.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
rd, err := ss.GetSectionReader(finish, byteSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
err = retry.Do(
|
||||
func() error {
|
||||
rd.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.ContentLength = byteSize
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
switch {
|
||||
case res.StatusCode >= 500 && res.StatusCode <= 504:
|
||||
return fmt.Errorf("server error: %d", res.StatusCode)
|
||||
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
return errors.New(string(data))
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}, retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.FreeSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
switch {
|
||||
case res.StatusCode >= 500 && res.StatusCode <= 504:
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
res.Body.Close()
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[Cloudreve-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
|
||||
time.Sleep(backoff)
|
||||
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
default:
|
||||
res.Body.Close()
|
||||
retryCount = 0
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
// 上传成功发送回调请求
|
||||
return d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
|
||||
@ -367,12 +369,15 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
||||
}
|
||||
|
||||
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
var etags []string
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -380,45 +385,47 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
|
||||
left := stream.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
rd, err := ss.GetSectionReader(finish, byteSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("PUT", u.UploadURLs[chunk],
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
err = retry.Do(
|
||||
func() error {
|
||||
rd.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadURLs[chunk],
|
||||
driver.NewLimitedUploadStream(ctx, rd))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.ContentLength = byteSize
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
etag := res.Header.Get("ETag")
|
||||
res.Body.Close()
|
||||
switch {
|
||||
case res.StatusCode != 200:
|
||||
return fmt.Errorf("server error: %d", res.StatusCode)
|
||||
case etag == "":
|
||||
return errors.New("failed to get ETag from header")
|
||||
default:
|
||||
etags = append(etags, etag)
|
||||
return nil
|
||||
}
|
||||
}, retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.FreeSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
etag := res.Header.Get("ETag")
|
||||
res.Body.Close()
|
||||
switch {
|
||||
case res.StatusCode != 200:
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[Cloudreve-S3] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
|
||||
time.Sleep(backoff)
|
||||
case etag == "":
|
||||
return errors.New("failed to get ETag from header")
|
||||
default:
|
||||
retryCount = 0
|
||||
etags = append(etags, etag)
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
|
||||
// s3LikeFinishUpload
|
||||
// https://github.com/cloudreve/frontend/blob/b485bf297974cbe4834d2e8e744ae7b7e5b2ad39/src/component/Uploader/core/api/index.ts#L204-L252
|
||||
bodyBuilder := &strings.Builder{}
|
||||
@ -431,8 +438,8 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
|
||||
))
|
||||
}
|
||||
bodyBuilder.WriteString("</CompleteMultipartUpload>")
|
||||
req, err := http.NewRequest(
|
||||
"POST",
|
||||
req, err := http.NewRequestWithContext(ctx,
|
||||
http.MethodPost,
|
||||
u.CompleteURL,
|
||||
strings.NewReader(bodyBuilder.String()),
|
||||
)
|
||||
|
@ -20,7 +20,9 @@ import (
|
||||
type CloudreveV4 struct {
|
||||
model.Storage
|
||||
Addition
|
||||
ref *CloudreveV4
|
||||
ref *CloudreveV4
|
||||
AccessExpires string
|
||||
RefreshExpires string
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Config() driver.Config {
|
||||
@ -44,13 +46,17 @@ func (d *CloudreveV4) Init(ctx context.Context) error {
|
||||
if d.ref != nil {
|
||||
return nil
|
||||
}
|
||||
if d.AccessToken == "" && d.RefreshToken != "" {
|
||||
return d.refreshToken()
|
||||
}
|
||||
if d.Username != "" {
|
||||
if d.canLogin() {
|
||||
return d.login()
|
||||
}
|
||||
return nil
|
||||
if d.RefreshToken != "" {
|
||||
return d.refreshToken()
|
||||
}
|
||||
if d.AccessToken == "" {
|
||||
return errors.New("no way to authenticate. At least AccessToken is required")
|
||||
}
|
||||
// ensure AccessToken is valid
|
||||
return d.parseJWT(d.AccessToken, &AccessJWT{})
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) InitReference(storage driver.Driver) error {
|
||||
@ -333,6 +339,21 @@ func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir mode
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
// TODO return storage details (total space, free space, etc.)
|
||||
var r CapacityResp
|
||||
err := d.request(http.MethodGet, "/user/capacity", nil, &r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: r.Total,
|
||||
FreeSpace: r.Total - r.Used,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
@ -26,15 +26,8 @@ type Addition struct {
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Cloudreve V4",
|
||||
LocalSort: false,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "cloudreve://my",
|
||||
CheckStatus: true,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: true,
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,13 @@ type BasicConfigResp struct {
|
||||
|
||||
type SiteLoginConfigResp struct {
|
||||
LoginCaptcha bool `json:"login_captcha"`
|
||||
Authn bool `json:"authn"`
|
||||
// RegCaptcha bool `json:"reg_captcha"`
|
||||
// ForgetCaptcha bool `json:"forget_captcha"`
|
||||
// RegisterEnabled bool `json:"register_enabled"`
|
||||
// TosURL string `json:"tos_url"`
|
||||
// PrivacyPolicyURL string `json:"privacy_policy_url"`
|
||||
// SsoDisplayName string `json:"sso_display_name"`
|
||||
// OidcDisplayName string `json:"oidc_display_name"`
|
||||
}
|
||||
|
||||
type PrepareLoginResp struct {
|
||||
@ -60,11 +66,27 @@ type CaptchaResp struct {
|
||||
Ticket string `json:"ticket"`
|
||||
}
|
||||
|
||||
type AccessJWT struct {
|
||||
TokenType string `json:"token_type"`
|
||||
Sub string `json:"sub"`
|
||||
Exp int64 `json:"exp"`
|
||||
Nbf int64 `json:"nbf"`
|
||||
}
|
||||
|
||||
type RefreshJWT struct {
|
||||
TokenType string `json:"token_type"`
|
||||
Sub string `json:"sub"`
|
||||
Exp int `json:"exp"`
|
||||
Nbf int `json:"nbf"`
|
||||
StateHash string `json:"state_hash"`
|
||||
RootTokenID string `json:"root_token_id"`
|
||||
}
|
||||
|
||||
type Token struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
AccessExpires time.Time `json:"access_expires"`
|
||||
RefreshExpires time.Time `json:"refresh_expires"`
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
AccessExpires string `json:"access_expires"`
|
||||
RefreshExpires string `json:"refresh_expires"`
|
||||
}
|
||||
|
||||
type TokenResponse struct {
|
||||
@ -182,3 +204,9 @@ type FolderSummaryResp struct {
|
||||
CalculatedAt time.Time `json:"calculated_at"`
|
||||
} `json:"folder_summary"`
|
||||
}
|
||||
|
||||
type CapacityResp struct {
|
||||
Total uint64 `json:"total"`
|
||||
Used uint64 `json:"used"`
|
||||
// StoragePackTotal uint64 `json:"storage_pack_total"`
|
||||
}
|
||||
|
@ -19,13 +19,24 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
const (
|
||||
CodeLoginRequired = http.StatusUnauthorized
|
||||
CodeCredentialInvalid = 40020 // Failed to issue token
|
||||
)
|
||||
|
||||
var (
|
||||
ErrorIssueToken = errors.New("failed to issue token")
|
||||
)
|
||||
|
||||
func (d *CloudreveV4) getUA() string {
|
||||
if d.CustomUA != "" {
|
||||
return d.CustomUA
|
||||
@ -37,6 +48,23 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
|
||||
if d.ref != nil {
|
||||
return d.ref.request(method, path, callback, out)
|
||||
}
|
||||
|
||||
// ensure token
|
||||
if d.isTokenExpired() {
|
||||
err := d.refreshToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return d._request(method, path, callback, out)
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) _request(method string, path string, callback base.ReqCallback, out any) error {
|
||||
if d.ref != nil {
|
||||
return d.ref._request(method, path, callback, out)
|
||||
}
|
||||
|
||||
u := d.Address + "/api/v4" + path
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
@ -63,15 +91,17 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
|
||||
}
|
||||
|
||||
if r.Code != 0 {
|
||||
if r.Code == 401 && d.RefreshToken != "" && path != "/session/token/refresh" {
|
||||
// try to refresh token
|
||||
err = d.refreshToken()
|
||||
if r.Code == CodeLoginRequired && d.canLogin() && path != "/session/token/refresh" {
|
||||
err = d.login()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return d.request(method, path, callback, out)
|
||||
}
|
||||
return errors.New(r.Msg)
|
||||
if r.Code == CodeCredentialInvalid {
|
||||
return ErrorIssueToken
|
||||
}
|
||||
return fmt.Errorf("%d: %s", r.Code, r.Msg)
|
||||
}
|
||||
|
||||
if out != nil && r.Data != nil {
|
||||
@ -89,17 +119,18 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) canLogin() bool {
|
||||
return d.Username != "" && d.Password != ""
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) login() error {
|
||||
var siteConfig SiteLoginConfigResp
|
||||
err := d.request(http.MethodGet, "/site/config/login", nil, &siteConfig)
|
||||
err := d._request(http.MethodGet, "/site/config/login", nil, &siteConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !siteConfig.Authn {
|
||||
return errors.New("authn not support")
|
||||
}
|
||||
var prepareLogin PrepareLoginResp
|
||||
err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
|
||||
err = d._request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -129,7 +160,7 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
|
||||
}
|
||||
if needCaptcha {
|
||||
var config BasicConfigResp
|
||||
err = d.request(http.MethodGet, "/site/config/basic", nil, &config)
|
||||
err = d._request(http.MethodGet, "/site/config/basic", nil, &config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -137,7 +168,7 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
|
||||
return fmt.Errorf("captcha type %s not support", config.CaptchaType)
|
||||
}
|
||||
var captcha CaptchaResp
|
||||
err = d.request(http.MethodGet, "/site/captcha", nil, &captcha)
|
||||
err = d._request(http.MethodGet, "/site/captcha", nil, &captcha)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -163,20 +194,22 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
|
||||
loginBody["captcha"] = captchaCode
|
||||
}
|
||||
var token TokenResponse
|
||||
err = d.request(http.MethodPost, "/session/token", func(req *resty.Request) {
|
||||
err = d._request(http.MethodPost, "/session/token", func(req *resty.Request) {
|
||||
req.SetBody(loginBody)
|
||||
}, &token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken
|
||||
d.AccessExpires, d.RefreshExpires = token.Token.AccessExpires, token.Token.RefreshExpires
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) refreshToken() error {
|
||||
// if no refresh token, try to login if possible
|
||||
if d.RefreshToken == "" {
|
||||
if d.Username != "" {
|
||||
if d.canLogin() {
|
||||
err := d.login()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot login to get refresh token, error: %s", err)
|
||||
@ -184,20 +217,127 @@ func (d *CloudreveV4) refreshToken() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parse jwt to check if refresh token is valid
|
||||
var jwt RefreshJWT
|
||||
err := d.parseJWT(d.RefreshToken, &jwt)
|
||||
if err != nil {
|
||||
// if refresh token is invalid, try to login if possible
|
||||
if d.canLogin() {
|
||||
return d.login()
|
||||
}
|
||||
d.GetStorage().SetStatus(fmt.Sprintf("Invalid RefreshToken: %s", err.Error()))
|
||||
op.MustSaveDriverStorage(d)
|
||||
return fmt.Errorf("invalid refresh token: %w", err)
|
||||
}
|
||||
|
||||
// do refresh token
|
||||
var token Token
|
||||
err := d.request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
|
||||
err = d._request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"refresh_token": d.RefreshToken,
|
||||
})
|
||||
}, &token)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrorIssueToken) {
|
||||
if d.canLogin() {
|
||||
// try to login again
|
||||
return d.login()
|
||||
}
|
||||
d.GetStorage().SetStatus("This session is no longer valid")
|
||||
op.MustSaveDriverStorage(d)
|
||||
return ErrorIssueToken
|
||||
}
|
||||
return err
|
||||
}
|
||||
d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken
|
||||
d.AccessExpires, d.RefreshExpires = token.AccessExpires, token.RefreshExpires
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) parseJWT(token string, jwt any) error {
|
||||
split := strings.Split(token, ".")
|
||||
if len(split) != 3 {
|
||||
return fmt.Errorf("invalid token length: %d, ensure the token is a valid JWT", len(split))
|
||||
}
|
||||
data, err := base64.RawURLEncoding.DecodeString(split[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid token encoding: %w, ensure the token is a valid JWT", err)
|
||||
}
|
||||
err = json.Unmarshal(data, &jwt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid token content: %w, ensure the token is a valid JWT", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// check if token is expired
|
||||
// https://github.com/cloudreve/frontend/blob/ddfacc1c31c49be03beb71de4cc114c8811038d6/src/session/index.ts#L177-L200
|
||||
func (d *CloudreveV4) isTokenExpired() bool {
|
||||
if d.RefreshToken == "" {
|
||||
// login again if username and password is set
|
||||
if d.canLogin() {
|
||||
return true
|
||||
}
|
||||
// no refresh token, cannot refresh
|
||||
return false
|
||||
}
|
||||
if d.AccessToken == "" {
|
||||
return true
|
||||
}
|
||||
var (
|
||||
err error
|
||||
expires time.Time
|
||||
)
|
||||
// check if token is expired
|
||||
if d.AccessExpires != "" {
|
||||
// use expires field if possible to prevent timezone issue
|
||||
// only available after login or refresh token
|
||||
// 2025-08-28T02:43:07.645109985+08:00
|
||||
expires, err = time.Parse(time.RFC3339Nano, d.AccessExpires)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
// fallback to parse jwt
|
||||
// if failed, disable the storage
|
||||
var jwt AccessJWT
|
||||
err = d.parseJWT(d.AccessToken, &jwt)
|
||||
if err != nil {
|
||||
d.GetStorage().SetStatus(fmt.Sprintf("Invalid AccessToken: %s", err.Error()))
|
||||
op.MustSaveDriverStorage(d)
|
||||
return false
|
||||
}
|
||||
// may be have timezone issue
|
||||
expires = time.Unix(jwt.Exp, 0)
|
||||
}
|
||||
// add a 10 minutes safe margin
|
||||
ddl := time.Now().Add(10 * time.Minute)
|
||||
if expires.Before(ddl) {
|
||||
// current access token expired, check if refresh token is expired
|
||||
// warning: cannot parse refresh token from jwt, because the exp field is not standard
|
||||
if d.RefreshExpires != "" {
|
||||
refreshExpires, err := time.Parse(time.RFC3339Nano, d.RefreshExpires)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if refreshExpires.Before(time.Now()) {
|
||||
// This session is no longer valid
|
||||
if d.canLogin() {
|
||||
// try to login again
|
||||
return true
|
||||
}
|
||||
d.GetStorage().SetStatus("This session is no longer valid")
|
||||
op.MustSaveDriverStorage(d)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
@ -253,13 +393,16 @@ func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u Fi
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadUrl := u.UploadUrls[0]
|
||||
credential := u.Credential
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -267,69 +410,67 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
|
||||
left := file.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(file, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
rd, err := ss.GetSectionReader(finish, byteSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
err = retry.Do(
|
||||
func() error {
|
||||
rd.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk),
|
||||
driver.NewLimitedUploadStream(ctx, rd))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.ContentLength = byteSize
|
||||
req.Header.Set("Authorization", fmt.Sprint(credential))
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
return fmt.Errorf("server error: %d", res.StatusCode)
|
||||
}
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var up Resp
|
||||
err = json.Unmarshal(body, &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if up.Code != 0 {
|
||||
return errors.New(up.Msg)
|
||||
}
|
||||
return nil
|
||||
}, retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.FreeSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Authorization", fmt.Sprint(credential))
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
err = func() error {
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
return errors.New(res.Status)
|
||||
}
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var up Resp
|
||||
err = json.Unmarshal(body, &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if up.Code != 0 {
|
||||
return errors.New(up.Msg)
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if err == nil {
|
||||
retryCount = 0
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||
chunk++
|
||||
} else {
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadUrl := u.UploadUrls[0]
|
||||
var finish int64 = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -337,46 +478,46 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
|
||||
left := file.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(file, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
rd, err := ss.GetSectionReader(finish, byteSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
err = retry.Do(
|
||||
func() error {
|
||||
rd.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.ContentLength = byteSize
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, file.GetSize()))
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
switch {
|
||||
case res.StatusCode >= 500 && res.StatusCode <= 504:
|
||||
return fmt.Errorf("server error: %d", res.StatusCode)
|
||||
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
return errors.New(string(data))
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}, retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.FreeSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, file.GetSize()))
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
switch {
|
||||
case res.StatusCode >= 500 && res.StatusCode <= 504:
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
res.Body.Close()
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[CloudreveV4-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
|
||||
time.Sleep(backoff)
|
||||
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
default:
|
||||
res.Body.Close()
|
||||
retryCount = 0
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||
}
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||
}
|
||||
// 上传成功发送回调请求
|
||||
return d.request(http.MethodPost, "/callback/onedrive/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) {
|
||||
@ -385,12 +526,15 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
var etags []string
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -398,43 +542,47 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
|
||||
left := file.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(file, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
rd, err := ss.GetSectionReader(finish, byteSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPut, u.UploadUrls[chunk],
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
err = retry.Do(
|
||||
func() error {
|
||||
rd.Seek(0, io.SeekStart)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadUrls[chunk],
|
||||
driver.NewLimitedUploadStream(ctx, rd))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.ContentLength = byteSize
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
etag := res.Header.Get("ETag")
|
||||
res.Body.Close()
|
||||
switch {
|
||||
case res.StatusCode != 200:
|
||||
return fmt.Errorf("server error: %d", res.StatusCode)
|
||||
case etag == "":
|
||||
return errors.New("failed to get ETag from header")
|
||||
default:
|
||||
etags = append(etags, etag)
|
||||
return nil
|
||||
}
|
||||
},
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second),
|
||||
)
|
||||
ss.FreeSectionReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
etag := res.Header.Get("ETag")
|
||||
res.Body.Close()
|
||||
switch {
|
||||
case res.StatusCode != 200:
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors", maxRetries)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("server error %d, retrying after %v...", res.StatusCode, backoff)
|
||||
time.Sleep(backoff)
|
||||
case etag == "":
|
||||
return errors.New("failed to get ETag from header")
|
||||
default:
|
||||
retryCount = 0
|
||||
etags = append(etags, etag)
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
|
||||
// s3LikeFinishUpload
|
||||
@ -448,8 +596,8 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
|
||||
))
|
||||
}
|
||||
bodyBuilder.WriteString("</CompleteMultipartUpload>")
|
||||
req, err := http.NewRequest(
|
||||
"POST",
|
||||
req, err := http.NewRequestWithContext(ctx,
|
||||
http.MethodPost,
|
||||
u.CompleteURL,
|
||||
strings.NewReader(bodyBuilder.String()),
|
||||
)
|
||||
|
230
drivers/cnb_releases/driver.go
Normal file
230
drivers/cnb_releases/driver.go
Normal file
@ -0,0 +1,230 @@
|
||||
package cnb_releases
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
type CnbReleases struct {
|
||||
model.Storage
|
||||
Addition
|
||||
ref *CnbReleases
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *CnbReleases) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Init(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CnbReleases) InitReference(storage driver.Driver) error {
|
||||
refStorage, ok := storage.(*CnbReleases)
|
||||
if ok {
|
||||
d.ref = refStorage
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("ref: storage is not CnbReleases")
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Drop(ctx context.Context) error {
|
||||
d.ref = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CnbReleases) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if dir.GetPath() == "/" {
|
||||
// get all releases for root dir
|
||||
var resp ReleaseList
|
||||
|
||||
err := d.Request(http.MethodGet, "/{repo}/-/releases", func(req *resty.Request) {
|
||||
req.SetPathParam("repo", d.Repo)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return utils.SliceConvert(resp, func(src Release) (model.Obj, error) {
|
||||
name := src.Name
|
||||
if d.UseTagName {
|
||||
name = src.TagName
|
||||
}
|
||||
return &model.Object{
|
||||
ID: src.ID,
|
||||
Name: name,
|
||||
Size: d.sumAssetsSize(src.Assets),
|
||||
Ctime: src.CreatedAt,
|
||||
Modified: src.UpdatedAt,
|
||||
IsFolder: true,
|
||||
}, nil
|
||||
})
|
||||
} else {
|
||||
// get release info by release id
|
||||
releaseID := dir.GetID()
|
||||
if releaseID == "" {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
var resp Release
|
||||
err := d.Request(http.MethodGet, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
|
||||
req.SetPathParam("repo", d.Repo)
|
||||
req.SetPathParam("release_id", releaseID)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return utils.SliceConvert(resp.Assets, func(src ReleaseAsset) (model.Obj, error) {
|
||||
return &Object{
|
||||
Object: model.Object{
|
||||
ID: src.ID,
|
||||
Path: src.Path,
|
||||
Name: src.Name,
|
||||
Size: src.Size,
|
||||
Ctime: src.CreatedAt,
|
||||
Modified: src.UpdatedAt,
|
||||
IsFolder: false,
|
||||
},
|
||||
ParentID: dir.GetID(),
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
return &model.Link{
|
||||
URL: "https://cnb.cool" + file.GetPath(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *CnbReleases) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
if parentDir.GetPath() == "/" {
|
||||
// create a new release
|
||||
branch := d.DefaultBranch
|
||||
if branch == "" {
|
||||
branch = "main" // fallback to "main" if not set
|
||||
}
|
||||
return d.Request(http.MethodPost, "/{repo}/-/releases", func(req *resty.Request) {
|
||||
req.SetPathParam("repo", d.Repo)
|
||||
req.SetBody(base.Json{
|
||||
"name": dirName,
|
||||
"tag_name": dirName,
|
||||
"target_commitish": branch,
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if srcObj.IsDir() && !d.UseTagName {
|
||||
return d.Request(http.MethodPatch, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
|
||||
req.SetPathParam("repo", d.Repo)
|
||||
req.SetPathParam("release_id", srcObj.GetID())
|
||||
req.SetFormData(map[string]string{
|
||||
"name": newName,
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if obj.IsDir() {
|
||||
return d.Request(http.MethodDelete, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
|
||||
req.SetPathParam("repo", d.Repo)
|
||||
req.SetPathParam("release_id", obj.GetID())
|
||||
}, nil)
|
||||
}
|
||||
if o, ok := obj.(*Object); ok {
|
||||
return d.Request(http.MethodDelete, "/{repo}/-/releases/{release_id}/assets/{asset_id}", func(req *resty.Request) {
|
||||
req.SetPathParam("repo", d.Repo)
|
||||
req.SetPathParam("release_id", o.ParentID)
|
||||
req.SetPathParam("asset_id", obj.GetID())
|
||||
}, nil)
|
||||
} else {
|
||||
return fmt.Errorf("unable to get release ID")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *CnbReleases) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
// 1. get upload info
|
||||
var resp ReleaseAssetUploadURL
|
||||
err := d.Request(http.MethodPost, "/{repo}/-/releases/{release_id}/asset-upload-url", func(req *resty.Request) {
|
||||
req.SetPathParam("repo", d.Repo)
|
||||
req.SetPathParam("release_id", dstDir.GetID())
|
||||
req.SetBody(base.Json{
|
||||
"asset_name": file.GetName(),
|
||||
"overwrite": true,
|
||||
"size": file.GetSize(),
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 2. upload file
|
||||
// use multipart to create form file
|
||||
var b bytes.Buffer
|
||||
w := multipart.NewWriter(&b)
|
||||
_, err = w.CreateFormFile("file", file.GetName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headSize := b.Len()
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
head := bytes.NewReader(b.Bytes()[:headSize])
|
||||
tail := bytes.NewReader(b.Bytes()[headSize:])
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.MultiReader(head, file, tail))
|
||||
|
||||
// use net/http to upload file
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Duration(resp.ExpiresInSec+1)*time.Second)
|
||||
defer cancel()
|
||||
req, err := http.NewRequestWithContext(ctxWithTimeout, http.MethodPost, resp.UploadURL, rateLimitedRd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", w.FormDataContentType())
|
||||
req.Header.Set("User-Agent", base.UserAgent)
|
||||
httpResp, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer httpResp.Body.Close()
|
||||
if httpResp.StatusCode != http.StatusNoContent {
|
||||
return fmt.Errorf("upload file failed: %s", httpResp.Status)
|
||||
}
|
||||
|
||||
// 3. verify upload
|
||||
return d.Request(http.MethodPost, resp.VerifyURL, nil, nil)
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*CnbReleases)(nil)
|
26
drivers/cnb_releases/meta.go
Normal file
26
drivers/cnb_releases/meta.go
Normal file
@ -0,0 +1,26 @@
|
||||
package cnb_releases
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
Repo string `json:"repo" type:"string" required:"true"`
|
||||
Token string `json:"token" type:"string" required:"true"`
|
||||
UseTagName bool `json:"use_tag_name" type:"bool" default:"false" help:"Use tag name instead of release name"`
|
||||
DefaultBranch string `json:"default_branch" type:"string" default:"main" help:"Default branch for new releases"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "CNB Releases",
|
||||
LocalSort: true,
|
||||
DefaultRoot: "/",
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &CnbReleases{}
|
||||
})
|
||||
}
|
100
drivers/cnb_releases/types.go
Normal file
100
drivers/cnb_releases/types.go
Normal file
@ -0,0 +1,100 @@
|
||||
package cnb_releases
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
)
|
||||
|
||||
type Object struct {
|
||||
model.Object
|
||||
ParentID string
|
||||
}
|
||||
|
||||
type TagList []Tag
|
||||
|
||||
type Tag struct {
|
||||
Commit struct {
|
||||
Author UserInfo `json:"author"`
|
||||
Commit CommitObject `json:"commit"`
|
||||
Committer UserInfo `json:"committer"`
|
||||
Parents []CommitParent `json:"parents"`
|
||||
Sha string `json:"sha"`
|
||||
} `json:"commit"`
|
||||
Name string `json:"name"`
|
||||
Target string `json:"target"`
|
||||
TargetType string `json:"target_type"`
|
||||
Verification TagObjectVerification `json:"verification"`
|
||||
}
|
||||
|
||||
type UserInfo struct {
|
||||
Freeze bool `json:"freeze"`
|
||||
Nickname string `json:"nickname"`
|
||||
Username string `json:"username"`
|
||||
}
|
||||
|
||||
type CommitObject struct {
|
||||
Author Signature `json:"author"`
|
||||
CommentCount int `json:"comment_count"`
|
||||
Committer Signature `json:"committer"`
|
||||
Message string `json:"message"`
|
||||
Tree CommitObjectTree `json:"tree"`
|
||||
Verification CommitObjectVerification `json:"verification"`
|
||||
}
|
||||
|
||||
type Signature struct {
|
||||
Date time.Time `json:"date"`
|
||||
Email string `json:"email"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type CommitObjectTree struct {
|
||||
Sha string `json:"sha"`
|
||||
}
|
||||
|
||||
type CommitObjectVerification struct {
|
||||
Payload string `json:"payload"`
|
||||
Reason string `json:"reason"`
|
||||
Signature string `json:"signature"`
|
||||
Verified bool `json:"verified"`
|
||||
VerifiedAt string `json:"verified_at"`
|
||||
}
|
||||
|
||||
type CommitParent = CommitObjectTree
|
||||
|
||||
type TagObjectVerification = CommitObjectVerification
|
||||
|
||||
type ReleaseList []Release
|
||||
|
||||
type Release struct {
|
||||
Assets []ReleaseAsset `json:"assets"`
|
||||
Author UserInfo `json:"author"`
|
||||
Body string `json:"body"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Draft bool `json:"draft"`
|
||||
ID string `json:"id"`
|
||||
IsLatest bool `json:"is_latest"`
|
||||
Name string `json:"name"`
|
||||
Prerelease bool `json:"prerelease"`
|
||||
PublishedAt time.Time `json:"published_at"`
|
||||
TagCommitish string `json:"tag_commitish"`
|
||||
TagName string `json:"tag_name"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
}
|
||||
|
||||
type ReleaseAsset struct {
|
||||
ContentType string `json:"content_type"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Size int64 `json:"size"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
Uploader UserInfo `json:"uploader"`
|
||||
}
|
||||
|
||||
type ReleaseAssetUploadURL struct {
|
||||
UploadURL string `json:"upload_url"`
|
||||
ExpiresInSec int `json:"expires_in_sec"`
|
||||
VerifyURL string `json:"verify_url"`
|
||||
}
|
58
drivers/cnb_releases/util.go
Normal file
58
drivers/cnb_releases/util.go
Normal file
@ -0,0 +1,58 @@
|
||||
package cnb_releases
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
func (d *CnbReleases) Request(method string, path string, callback base.ReqCallback, resp any) error {
|
||||
if d.ref != nil {
|
||||
return d.ref.Request(method, path, callback, resp)
|
||||
}
|
||||
var url string
|
||||
if strings.HasPrefix(path, "http") {
|
||||
url = path
|
||||
} else {
|
||||
url = "https://api.cnb.cool" + path
|
||||
}
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeader("Accept", "application/json")
|
||||
req.SetAuthScheme("Bearer")
|
||||
req.SetAuthToken(d.Token)
|
||||
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
res, err := req.Execute(method, url)
|
||||
log.Debugln(res.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode() != http.StatusOK && res.StatusCode() != http.StatusCreated && res.StatusCode() != http.StatusNoContent {
|
||||
return fmt.Errorf("failed to request %s, status code: %d, message: %s", url, res.StatusCode(), res.String())
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
err = json.Unmarshal(res.Body(), resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CnbReleases) sumAssetsSize(assets []ReleaseAsset) int64 {
|
||||
var size int64
|
||||
for _, asset := range assets {
|
||||
size += asset.Size
|
||||
}
|
||||
return size
|
||||
}
|
@ -1,12 +1,14 @@
|
||||
package crypt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
stdpath "path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
@ -110,7 +112,7 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
//return d.list(ctx, d.RemotePath, path)
|
||||
//remoteFull
|
||||
|
||||
objs, err := fs.List(ctx, d.getPathForRemote(path, true), &fs.ListArgs{NoLog: true})
|
||||
objs, err := fs.List(ctx, d.getPathForRemote(path, true), &fs.ListArgs{NoLog: true, Refresh: args.Refresh})
|
||||
// the obj must implement the model.SetPath interface
|
||||
// return objs, err
|
||||
if err != nil {
|
||||
@ -241,6 +243,9 @@ func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
//return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
// https://github.com/rclone/rclone/blob/v1.67.0/backend/crypt/cipher.go#L37
|
||||
const fileHeaderSize = 32
|
||||
|
||||
func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
dstDirActualPath, err := d.getActualPathForRemote(file.GetPath(), false)
|
||||
if err != nil {
|
||||
@ -251,61 +256,69 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if remoteLink.RangeReadCloser == nil && remoteLink.MFile == nil && len(remoteLink.URL) == 0 {
|
||||
remoteSize := remoteLink.ContentLength
|
||||
if remoteSize <= 0 {
|
||||
remoteSize = remoteFile.GetSize()
|
||||
}
|
||||
rrf, err := stream.GetRangeReaderFromLink(remoteSize, remoteLink)
|
||||
if err != nil {
|
||||
_ = remoteLink.Close()
|
||||
return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion")
|
||||
}
|
||||
remoteFileSize := remoteFile.GetSize()
|
||||
remoteClosers := utils.EmptyClosers()
|
||||
rangeReaderFunc := func(ctx context.Context, underlyingOffset, underlyingLength int64) (io.ReadCloser, error) {
|
||||
length := underlyingLength
|
||||
if underlyingLength >= 0 && underlyingOffset+underlyingLength >= remoteFileSize {
|
||||
length = -1
|
||||
}
|
||||
rrc := remoteLink.RangeReadCloser
|
||||
if len(remoteLink.URL) > 0 {
|
||||
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, remoteLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
mu := &sync.Mutex{}
|
||||
var fileHeader []byte
|
||||
rangeReaderFunc := func(ctx context.Context, offset, limit int64) (io.ReadCloser, error) {
|
||||
length := limit
|
||||
if offset == 0 && limit > 0 {
|
||||
mu.Lock()
|
||||
if limit <= fileHeaderSize {
|
||||
defer mu.Unlock()
|
||||
if fileHeader != nil {
|
||||
return io.NopCloser(bytes.NewReader(fileHeader[:limit])), nil
|
||||
}
|
||||
length = fileHeaderSize
|
||||
} else if fileHeader == nil {
|
||||
defer mu.Unlock()
|
||||
} else {
|
||||
mu.Unlock()
|
||||
}
|
||||
rrc = converted
|
||||
}
|
||||
if rrc != nil {
|
||||
remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length})
|
||||
remoteClosers.AddClosers(rrc.GetClosers())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return remoteReader, nil
|
||||
}
|
||||
if remoteLink.MFile != nil {
|
||||
_, err := remoteLink.MFile.Seek(underlyingOffset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//keep reuse same MFile and close at last.
|
||||
remoteClosers.Add(remoteLink.MFile)
|
||||
return io.NopCloser(remoteLink.MFile), nil
|
||||
}
|
||||
|
||||
return nil, errs.NotSupport
|
||||
|
||||
}
|
||||
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length)
|
||||
remoteReader, err := rrf.RangeRead(ctx, http_range.Range{Start: offset, Length: length})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readSeeker, nil
|
||||
|
||||
if offset == 0 && limit > 0 {
|
||||
fileHeader = make([]byte, fileHeaderSize)
|
||||
n, err := io.ReadFull(remoteReader, fileHeader)
|
||||
if n != fileHeaderSize {
|
||||
fileHeader = nil
|
||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", fileHeaderSize, n, err)
|
||||
}
|
||||
if limit <= fileHeaderSize {
|
||||
remoteReader.Close()
|
||||
return io.NopCloser(bytes.NewReader(fileHeader[:limit])), nil
|
||||
} else {
|
||||
remoteReader = utils.ReadCloser{
|
||||
Reader: io.MultiReader(bytes.NewReader(fileHeader), remoteReader),
|
||||
Closer: remoteReader,
|
||||
}
|
||||
}
|
||||
}
|
||||
return remoteReader, nil
|
||||
}
|
||||
|
||||
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
|
||||
resultLink := &model.Link{
|
||||
RangeReadCloser: resultRangeReadCloser,
|
||||
Expiration: remoteLink.Expiration,
|
||||
}
|
||||
|
||||
return resultLink, nil
|
||||
|
||||
return &model.Link{
|
||||
RangeReader: stream.RangeReaderFunc(func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readSeeker, nil
|
||||
}),
|
||||
SyncClosers: utils.NewSyncClosers(remoteLink),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Crypt) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
@ -388,7 +401,6 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
|
||||
},
|
||||
Reader: wrappedIn,
|
||||
Mimetype: "application/octet-stream",
|
||||
WebPutAsTask: streamer.NeedStore(),
|
||||
ForceStreamUpload: true,
|
||||
Exist: streamer.GetExist(),
|
||||
}
|
||||
@ -399,6 +411,20 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Crypt) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
wd, ok := d.remoteStorage.(driver.WithDetails)
|
||||
if !ok {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
remoteDetails, err := wd.GetDetails(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: remoteDetails.DiskUsage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//func (d *Safe) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
@ -26,17 +26,12 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Crypt",
|
||||
LocalSort: true,
|
||||
OnlyLocal: true,
|
||||
OnlyProxy: true,
|
||||
NoCache: true,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: false,
|
||||
Name: "Crypt",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
NoCache: true,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user