mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-09-20 04:36:09 +08:00
Compare commits
313 Commits
Author | SHA1 | Date | |
---|---|---|---|
55d3827dee | |||
1fbc9427df | |||
bb3d139a47 | |||
d227ab85d6 | |||
5342ae96d0 | |||
273e15a050 | |||
13aad2c2fa | |||
368dc65a6e | |||
8b4b6ba970 | |||
4d28e838ce | |||
3930d4789a | |||
d0c22a1ecb | |||
57fceabcf4 | |||
8c244a984d | |||
df479ba806 | |||
5ae8e96237 | |||
aa0ced47b0 | |||
ab747d9052 | |||
93c06213d4 | |||
b9b8eed285 | |||
317d190b77 | |||
52d7d819ad | |||
0483e0f868 | |||
08dae4f55f | |||
9ac0484bc0 | |||
8cf15183a0 | |||
c8f2aaaa55 | |||
1208bd0a83 | |||
6b096bcad4 | |||
58dbf088f9 | |||
05ff7908f2 | |||
a703b736c9 | |||
e458f2ab53 | |||
a5a22e7085 | |||
9469c95b14 | |||
cf912dcf7a | |||
ccd4af26e5 | |||
1682e873d6 | |||
54ae7e6d9b | |||
991da7d87f | |||
a498091aef | |||
976c82bb2b | |||
5b41a3bdff | |||
19d1a3b785 | |||
3c7b0c4999 | |||
d6867b4ab6 | |||
11cf561307 | |||
239b58f63e | |||
7da06655cb | |||
e0b3a611ba | |||
be1ad08a83 | |||
4e9c30f49d | |||
0ee31a3f36 | |||
23bddf991e | |||
da8d6607cf | |||
6134574dac | |||
b273232f87 | |||
358e4d851e | |||
e8a1ed638a | |||
4106e2a996 | |||
c2271df64e | |||
d4b8570eb8 | |||
bd297e8ccc | |||
923d282c8a | |||
4d8c4d7089 | |||
e93ab76036 | |||
a9f02ecdac | |||
93849a3b5b | |||
c2e0d0c9ce | |||
4a713363ee | |||
3da8ccb7a7 | |||
676b8cff0b | |||
57cf28fc90 | |||
8cf90e074d | |||
74c2ed8306 | |||
5f03edd683 | |||
8b65c918d4 | |||
b5f0e3e5ee | |||
179894ff37 | |||
e2fc89c637 | |||
cacf67b181 | |||
afb043e1d6 | |||
d9debb81ad | |||
4c069fddd6 | |||
b450a2104d | |||
7d0de17daf | |||
bba4fb2203 | |||
a20c2020f8 | |||
a92b5eb929 | |||
6817494a41 | |||
5a0d8ee1b8 | |||
012e51c551 | |||
59ec1dbc9b | |||
6bb28d13f9 | |||
811a862288 | |||
74d32fd4d7 | |||
cedb3d488d | |||
86324d2d6b | |||
648079ae24 | |||
e8d45398d6 | |||
0c461991f9 | |||
2a4c546a8b | |||
750d4eb3f6 | |||
cc01b410a4 | |||
e5fbe72581 | |||
283f3723d1 | |||
ad8c7b37a1 | |||
a84ffb96e9 | |||
19c6b6f930 | |||
eed3c0533c | |||
c72ba9828a | |||
4965a1b909 | |||
1bba550469 | |||
d678322b18 | |||
efd8897bdf | |||
7c7cec0993 | |||
3838ef0663 | |||
9e610af114 | |||
0177177238 | |||
a77e515c9b | |||
4af16ab009 | |||
da35423198 | |||
9612d61e60 | |||
92f396df10 | |||
9557834342 | |||
288ba2fcda | |||
f3920b02f7 | |||
2ec9dad3db | |||
e11227fe2d | |||
859931b78c | |||
b591524ac3 | |||
dc26b4fce5 | |||
f8cf02a2da | |||
a214e794f4 | |||
54d761b371 | |||
bea7a9b0e4 | |||
a46f4cff18 | |||
8eb2d600c7 | |||
ffb6c2a180 | |||
8e19a0fb07 | |||
79f4f96217 | |||
7f53390dce | |||
e83f8e197a | |||
d707f002eb | |||
c0f69f7fa7 | |||
adf914115f | |||
c166fe6127 | |||
9725e0fd76 | |||
5c4cd1b198 | |||
44f4658f37 | |||
b4997e7a7e | |||
f26892ac3c | |||
aae3851979 | |||
a17b3dc405 | |||
022614f155 | |||
874dc292ae | |||
9442013b37 | |||
862b1c3c53 | |||
52c93f2046 | |||
3d13d5213b | |||
103abc942e | |||
f0236522f3 | |||
6a3b8fab06 | |||
5c288dc763 | |||
6d0d3ac612 | |||
1ec97733e5 | |||
ded67b746b | |||
4590795cba | |||
060fd36883 | |||
76a1f99df1 | |||
38766a4cb7 | |||
bcc518cf96 | |||
3fdb2c79bf | |||
18f7a2ba0e | |||
e32cebb153 | |||
02031bd835 | |||
7726cb14a0 | |||
23cfe8090b | |||
d89d0a05b4 | |||
14d57ae2ec | |||
d5f4b687bb | |||
bdb880f9f2 | |||
22575a1c61 | |||
890297aa27 | |||
0fd602bc1b | |||
f6470af971 | |||
d695d28e13 | |||
ffc14ea14c | |||
25df3daba5 | |||
ce3cb2e31e | |||
afe23986d2 | |||
0026f0c860 | |||
9e69b2aaa3 | |||
af71deb407 | |||
fe079cf0a3 | |||
cf85d49b6c | |||
96cf2f7cf9 | |||
b0736d2d02 | |||
49213c1321 | |||
64dd3cb047 | |||
12fd52b6b7 | |||
27533d0e20 | |||
34a2eeb4a9 | |||
652e4ba1cb | |||
639b5cf7c2 | |||
b5c1386645 | |||
041868dfb8 | |||
cfbc157477 | |||
5d44806064 | |||
fc8b99c862 | |||
24560b43c0 | |||
39ca385778 | |||
ef0531ad40 | |||
12540a8abc | |||
0f5ed14fe2 | |||
ca55b89322 | |||
a3c7cb059d | |||
0f8545133b | |||
72fad1be2e | |||
b7ce7f172b | |||
248c041711 | |||
70b937e031 | |||
79521db8e0 | |||
015d3ecd00 | |||
89451b6d98 | |||
681cb6c8a4 | |||
c2d1316f65 | |||
5e8d8d070a | |||
c7c0bfe810 | |||
e9c73b52db | |||
7d24a5d45f | |||
3ab309e00e | |||
8822eef97e | |||
7613f886d0 | |||
fe02a989bd | |||
2bed40cfce | |||
87ca1b96ae | |||
5a4649c929 | |||
2e2cec05fd | |||
b1afadd129 | |||
a59ad9a84e | |||
2e889fb07d | |||
d95c4f0127 | |||
1c58d11d62 | |||
e11c390c4d | |||
2965915bed | |||
da1cfd1945 | |||
8a29790327 | |||
7cd8f648c8 | |||
b8e6083e19 | |||
3f821bdcd1 | |||
9e05c81d9c | |||
f1552b67a0 | |||
20d1d5b479 | |||
fdcc2f136e | |||
5feb86ceee | |||
ee783fa1be | |||
0bcb4fe16d | |||
4f57bd3ae6 | |||
cf42fe6a40 | |||
c4775521c6 | |||
ffa03bfda1 | |||
630cf30af5 | |||
bc5117fa4f | |||
11e7284824 | |||
b2b91a9281 | |||
f541489d7d | |||
6d9c554f6f | |||
e532ab31ef | |||
bf0705ec17 | |||
17b42b9fa4 | |||
41bdab49aa | |||
8f89c55aca | |||
b449312da8 | |||
52d4e8ec47 | |||
28e5b5759e | |||
477c43971f | |||
0a9921fa79 | |||
88abb323cb | |||
f0b1aeaf8d | |||
c8470b9a2a | |||
d0ee90cd11 | |||
544a7ea022 | |||
4f5cabc725 | |||
a2f266277c | |||
a4bfbf8a83 | |||
ddffacf07b | |||
3375c26c41 | |||
ab68faef44 | |||
2e21df0661 | |||
af18cb138b | |||
31c55a2adf | |||
465dd1703d | |||
a6304285b6 | |||
affd0cecd1 | |||
37640221c0 | |||
e4bd223d1c | |||
0cde4e73d6 | |||
7b62dcb88c | |||
c38dc6df7c | |||
5668e4a4ea | |||
1335f80362 | |||
704d3854df | |||
44cc71d354 | |||
9a9aee9ac6 | |||
4fcc3a187e | |||
10a76c701d | |||
6e13923225 | |||
32890da29f | |||
758554a40f | |||
4563aea47e | |||
35d6f3b8fc | |||
b4e6ab12d9 |
81
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
81
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -1,81 +0,0 @@
|
||||
name: "Bug report"
|
||||
description: Bug report
|
||||
labels: [bug]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this bug report, please **confirm that your issue is not a duplicate issue and not because of your operation or version issues**
|
||||
感谢您花时间填写此错误报告,请**务必确认您的issue不是重复的且不是因为您的操作或版本问题**
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Please make sure of the following things
|
||||
description: |
|
||||
You must check all the following, otherwise your issue may be closed directly. Or you can go to the [discussions](https://github.com/alist-org/alist/discussions)
|
||||
您必须勾选以下所有内容,否则您的issue可能会被直接关闭。或者您可以去[讨论区](https://github.com/alist-org/alist/discussions)
|
||||
options:
|
||||
- label: |
|
||||
I have read the [documentation](https://alist.nn.ci).
|
||||
我已经阅读了[文档](https://alist.nn.ci)。
|
||||
- label: |
|
||||
I'm sure there are no duplicate issues or discussions.
|
||||
我确定没有重复的issue或讨论。
|
||||
- label: |
|
||||
I'm sure it's due to `AList` and not something else(such as [Network](https://alist.nn.ci/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`).
|
||||
我确定是`AList`的问题,而不是其他原因(例如[网络](https://alist.nn.ci/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host),`依赖`或`操作`)。
|
||||
- label: |
|
||||
I'm sure this issue is not fixed in the latest version.
|
||||
我确定这个问题在最新版本中没有被修复。
|
||||
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: AList Version / AList 版本
|
||||
description: |
|
||||
What version of our software are you running? Do not use `latest` or `master` as an answer.
|
||||
您使用的是哪个版本的软件?请不要使用`latest`或`master`作为答案。
|
||||
placeholder: v3.xx.xx
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: driver
|
||||
attributes:
|
||||
label: Driver used / 使用的存储驱动
|
||||
description: |
|
||||
What storage driver are you using?
|
||||
您使用的是哪个存储驱动?
|
||||
placeholder: "for example: Onedrive"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: bug-description
|
||||
attributes:
|
||||
label: Describe the bug / 问题描述
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
attributes:
|
||||
label: Reproduction / 复现链接
|
||||
description: |
|
||||
Please provide a link to a repo that can reproduce the problem you ran into. Please be aware that your issue may be closed directly if you don't provide it.
|
||||
请提供能复现此问题的链接,请知悉如果不提供它你的issue可能会被直接关闭。
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
label: Config / 配置
|
||||
description: |
|
||||
Please provide the configuration file of your `AList` application and take a screenshot of the relevant storage configuration. (hide privacy field)
|
||||
请提供您的`AList`应用的配置文件,并截图相关存储配置。(隐藏隐私字段)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Logs / 日志
|
||||
description: |
|
||||
Please copy and paste any relevant log output.
|
||||
请复制粘贴错误日志,或者截图
|
5
.github/ISSUE_TEMPLATE/config.yml
vendored
5
.github/ISSUE_TEMPLATE/config.yml
vendored
@ -1,5 +0,0 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Questions & Discussions
|
||||
url: https://github.com/alist-org/alist/discussions
|
||||
about: Use GitHub discussions for message-board style questions and discussions.
|
33
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
33
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@ -1,33 +0,0 @@
|
||||
name: "Feature request"
|
||||
description: Feature request
|
||||
labels: [enhancement]
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Please make sure of the following things
|
||||
description: You may select more than one, even select all.
|
||||
options:
|
||||
- label: I have read the [documentation](https://alist.nn.ci).
|
||||
- label: I'm sure there are no duplicate issues or discussions.
|
||||
- label: I'm sure this feature is not implemented.
|
||||
- label: I'm sure it's a reasonable and popular requirement.
|
||||
- type: textarea
|
||||
id: feature-description
|
||||
attributes:
|
||||
label: Description of the feature / 需求描述
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: suggested-solution
|
||||
attributes:
|
||||
label: Suggested solution / 实现思路
|
||||
description: |
|
||||
Solutions to achieve this requirement.
|
||||
实现此需求的解决思路。
|
||||
- type: textarea
|
||||
id: additional-context
|
||||
attributes:
|
||||
label: Additional context / 附件
|
||||
description: |
|
||||
Any other context or screenshots about the feature request here, or information you find helpful.
|
||||
相关的任何其他上下文或截图,或者你觉得有帮助的信息
|
21
.github/config.yml
vendored
21
.github/config.yml
vendored
@ -1,21 +0,0 @@
|
||||
# Configuration for welcome - https://github.com/behaviorbot/welcome
|
||||
|
||||
# Configuration for new-issue-welcome - https://github.com/behaviorbot/new-issue-welcome
|
||||
|
||||
# Comment to be posted to on first time issues
|
||||
newIssueWelcomeComment: >
|
||||
Thanks for opening your first issue here! Be sure to follow the issue template!
|
||||
|
||||
# Configuration for new-pr-welcome - https://github.com/behaviorbot/new-pr-welcome
|
||||
|
||||
# Comment to be posted to on PRs from first time contributors in your repository
|
||||
newPRWelcomeComment: >
|
||||
Thanks for opening this pull request! Please check out our contributing guidelines.
|
||||
|
||||
# Configuration for first-pr-merge - https://github.com/behaviorbot/first-pr-merge
|
||||
|
||||
# Comment to be posted to on pull requests merged by a first time user
|
||||
firstPRMergeComment: >
|
||||
Congrats on merging your first pull request! We here at behavior bot are proud of you!
|
||||
|
||||
# It is recommend to include as many gifs and emojis as possible
|
21
.github/stale.yml
vendored
21
.github/stale.yml
vendored
@ -1,21 +0,0 @@
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
daysUntilStale: 44
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 20
|
||||
# Issues with these labels will never be considered stale
|
||||
exemptLabels:
|
||||
- accepted
|
||||
- security
|
||||
- working
|
||||
- pr-welcome
|
||||
# Label to use when marking an issue as stale
|
||||
staleLabel: stale
|
||||
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
# Comment to post when closing a stale issue. Set to `false` to disable
|
||||
closeComment: >
|
||||
This issue was closed due to inactive more than 52 days. You can reopen or
|
||||
recreate it if you think it should continue. Thank you for your contributions again.
|
71
.github/workflows/auto_lang.yml
vendored
71
.github/workflows/auto_lang.yml
vendored
@ -1,71 +0,0 @@
|
||||
name: auto_lang
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
paths:
|
||||
- 'drivers/**'
|
||||
- 'internal/bootstrap/data/setting.go'
|
||||
- 'internal/conf/const.go'
|
||||
- 'cmd/lang.go'
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
auto_lang:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: auto generate lang.json
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout alist
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: alist
|
||||
|
||||
- name: Checkout alist-web
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: 'alist-org/alist-web'
|
||||
ref: main
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
path: alist-web
|
||||
|
||||
- name: Generate lang
|
||||
run: |
|
||||
cd alist
|
||||
go run ./main.go lang
|
||||
cd ..
|
||||
|
||||
- name: Copy lang file
|
||||
run: |
|
||||
cp -f ./alist/lang/*.json ./alist-web/src/lang/en/ 2>/dev/null || :
|
||||
|
||||
- name: Commit git
|
||||
run: |
|
||||
cd alist-web
|
||||
git add .
|
||||
git config --local user.email "bot@nn.ci"
|
||||
git config --local user.name "IlaBot"
|
||||
git commit -m "chore: auto update i18n file" -a 2>/dev/null || :
|
||||
cd ..
|
||||
|
||||
- name: Push lang files
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ secrets.MY_TOKEN }}
|
||||
branch: main
|
||||
directory: alist-web
|
||||
repository: alist-org/alist-web
|
138
.github/workflows/beta_release.yml
vendored
138
.github/workflows/beta_release.yml
vendored
@ -1,138 +0,0 @@
|
||||
name: beta release
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ 'main' ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
changelog:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Beta Release Changelog
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create or update ref
|
||||
id: create-or-update-ref
|
||||
uses: ovsds/create-or-update-ref-action@v1
|
||||
with:
|
||||
ref: tags/beta
|
||||
sha: ${{ github.sha }}
|
||||
|
||||
- name: Delete beta tag
|
||||
run: git tag -d beta
|
||||
continue-on-error: true
|
||||
|
||||
- name: changelog # or changelogithub@0.12 if ensure the stable result
|
||||
id: changelog
|
||||
run: |
|
||||
git tag -l
|
||||
npx changelogithub --output CHANGELOG.md
|
||||
# npx changelogen@latest --output CHANGELOG.md
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
body_path: CHANGELOG.md
|
||||
files: CHANGELOG.md
|
||||
prerelease: true
|
||||
tag_name: beta
|
||||
|
||||
release:
|
||||
needs:
|
||||
- changelog
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target: '!(*musl*|*windows-arm64*|*android*|*freebsd*)' # xgo
|
||||
hash: "md5"
|
||||
- target: 'linux-!(arm*)-musl*' #musl-not-arm
|
||||
hash: "md5-linux-musl"
|
||||
- target: 'linux-arm*-musl*' #musl-arm
|
||||
hash: "md5-linux-musl-arm"
|
||||
- target: 'windows-arm64' #win-arm64
|
||||
hash: "md5-windows-arm64"
|
||||
- target: 'android-*' #android
|
||||
hash: "md5-android"
|
||||
- target: 'freebsd-*' #freebsd
|
||||
hash: "md5-freebsd"
|
||||
|
||||
name: Beta Release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
|
||||
- name: Setup web
|
||||
run: bash build.sh dev web
|
||||
|
||||
- name: Build
|
||||
uses: go-cross/cgo-actions@v1
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
musl-target-format: $os-$musl-$arch
|
||||
out-dir: build
|
||||
x-flags: |
|
||||
github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at
|
||||
github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe
|
||||
github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit
|
||||
github.com/alist-org/alist/v3/internal/conf.Version=$tag
|
||||
github.com/alist-org/alist/v3/internal/conf.WebVersion=dev
|
||||
|
||||
- name: Compress
|
||||
run: |
|
||||
bash build.sh zip ${{ matrix.hash }}
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
||||
prerelease: true
|
||||
tag_name: beta
|
||||
|
||||
desktop:
|
||||
needs:
|
||||
- release
|
||||
name: Beta Release Desktop
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: alist-org/desktop-release
|
||||
ref: main
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Commit
|
||||
run: |
|
||||
git config --local user.email "bot@nn.ci"
|
||||
git config --local user.name "IlaBot"
|
||||
git commit --allow-empty -m "Trigger build for ${{ github.sha }}"
|
||||
|
||||
- name: Push commit
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ secrets.MY_TOKEN }}
|
||||
branch: main
|
||||
repository: alist-org/desktop-release
|
61
.github/workflows/build.yml
vendored
61
.github/workflows/build.yml
vendored
@ -1,61 +0,0 @@
|
||||
name: build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ 'main' ]
|
||||
pull_request:
|
||||
branches: [ 'main' ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ubuntu-latest]
|
||||
target:
|
||||
- darwin-amd64
|
||||
- darwin-arm64
|
||||
- windows-amd64
|
||||
- linux-arm64-musl
|
||||
- linux-amd64-musl
|
||||
- windows-arm64
|
||||
- android-arm64
|
||||
name: Build
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: benjlevesque/short-sha@v3.0
|
||||
id: short-sha
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
|
||||
- name: Setup web
|
||||
run: bash build.sh dev web
|
||||
|
||||
- name: Build
|
||||
uses: go-cross/cgo-actions@v1
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
musl-target-format: $os-$musl-$arch
|
||||
out-dir: build
|
||||
x-flags: |
|
||||
github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at
|
||||
github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe
|
||||
github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit
|
||||
github.com/alist-org/alist/v3/internal/conf.Version=$tag
|
||||
github.com/alist-org/alist/v3/internal/conf.WebVersion=dev
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: alist_${{ env.SHA }}_${{ matrix.target }}
|
||||
path: build/*
|
22
.github/workflows/issue_close_question.yml
vendored
22
.github/workflows/issue_close_question.yml
vendored
@ -1,22 +0,0 @@
|
||||
name: Close need info
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 */1 * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
close-need-info:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: close-issues
|
||||
uses: actions-cool/issues-helper@v3
|
||||
with:
|
||||
actions: 'close-issues'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
labels: 'question'
|
||||
inactive-day: 3
|
||||
close-reason: 'not_planned'
|
||||
body: |
|
||||
Hello @${{ github.event.issue.user.login }}, this issue was closed due to no activities in 3 days.
|
||||
你好 @${{ github.event.issue.user.login }},此issue因超过3天未回复被关闭。
|
21
.github/workflows/issue_close_stale.yml
vendored
21
.github/workflows/issue_close_stale.yml
vendored
@ -1,21 +0,0 @@
|
||||
name: Close inactive
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 */7 * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
close-inactive:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: close-issues
|
||||
uses: actions-cool/issues-helper@v3
|
||||
with:
|
||||
actions: 'close-issues'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
labels: 'stale'
|
||||
inactive-day: 8
|
||||
close-reason: 'not_planned'
|
||||
body: |
|
||||
Hello @${{ github.event.issue.user.login }}, this issue was closed due to inactive more than 52 days. You can reopen or recreate it if you think it should continue. Thank you for your contributions again.
|
25
.github/workflows/issue_duplicate.yml
vendored
25
.github/workflows/issue_duplicate.yml
vendored
@ -1,25 +0,0 @@
|
||||
name: Issue Duplicate
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
create-comment:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.label.name == 'duplicate'
|
||||
steps:
|
||||
- name: Create comment
|
||||
uses: actions-cool/issues-helper@v3
|
||||
with:
|
||||
actions: 'create-comment'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
issue-number: ${{ github.event.issue.number }}
|
||||
body: |
|
||||
Hello @${{ github.event.issue.user.login }}, your issue is a duplicate and will be closed.
|
||||
你好 @${{ github.event.issue.user.login }},你的issue是重复的,将被关闭。
|
||||
- name: Close issue
|
||||
uses: actions-cool/issues-helper@v3
|
||||
with:
|
||||
actions: 'close-issue'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
25
.github/workflows/issue_invalid.yml
vendored
25
.github/workflows/issue_invalid.yml
vendored
@ -1,25 +0,0 @@
|
||||
name: Issue Invalid
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
create-comment:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.label.name == 'invalid'
|
||||
steps:
|
||||
- name: Create comment
|
||||
uses: actions-cool/issues-helper@v3
|
||||
with:
|
||||
actions: 'create-comment'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
issue-number: ${{ github.event.issue.number }}
|
||||
body: |
|
||||
Hello @${{ github.event.issue.user.login }}, your issue is invalid and will be closed.
|
||||
你好 @${{ github.event.issue.user.login }},你的issue无效,将被关闭。
|
||||
- name: Close issue
|
||||
uses: actions-cool/issues-helper@v3
|
||||
with:
|
||||
actions: 'close-issue'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
17
.github/workflows/issue_on_close.yml
vendored
17
.github/workflows/issue_on_close.yml
vendored
@ -1,17 +0,0 @@
|
||||
name: Remove working label when issue closed
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [closed]
|
||||
|
||||
jobs:
|
||||
rm-working:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Remove working label
|
||||
uses: actions-cool/issues-helper@v3
|
||||
with:
|
||||
actions: 'remove-labels'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
issue-number: ${{ github.event.issue.number }}
|
||||
labels: 'working,pr-welcome'
|
20
.github/workflows/issue_question.yml
vendored
20
.github/workflows/issue_question.yml
vendored
@ -1,20 +0,0 @@
|
||||
name: Issue Question
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
create-comment:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.label.name == 'question'
|
||||
steps:
|
||||
- name: Create comment
|
||||
uses: actions-cool/issues-helper@v3.6.0
|
||||
with:
|
||||
actions: 'create-comment'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
issue-number: ${{ github.event.issue.number }}
|
||||
body: |
|
||||
Hello @${{ github.event.issue.user.login }}, please input issue by template and add detail. Issues labeled by `question` will be closed if no activities in 3 days.
|
||||
你好 @${{ github.event.issue.user.login }},请按照issue模板填写, 并详细说明问题/日志记录/复现步骤/复现链接/实现思路或提供更多信息等, 3天内未回复issue自动关闭。
|
19
.github/workflows/issue_similarity.yml
vendored
19
.github/workflows/issue_similarity.yml
vendored
@ -1,19 +0,0 @@
|
||||
name: Issues Similarity Analysis
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, edited]
|
||||
|
||||
jobs:
|
||||
similarity-analysis:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: analysis
|
||||
uses: actions-cool/issues-similarity-analysis@v1
|
||||
with:
|
||||
filter-threshold: 0.5
|
||||
comment-title: '### See'
|
||||
comment-body: '${index}. ${similarity} #${number}'
|
||||
show-footer: false
|
||||
show-mentioned: true
|
||||
since-days: 730
|
13
.github/workflows/issue_translate.yml
vendored
13
.github/workflows/issue_translate.yml
vendored
@ -1,13 +0,0 @@
|
||||
name: Translation Helper
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened]
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
translate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions-cool/translation-helper@v1.2.0
|
25
.github/workflows/issue_wontfix.yml
vendored
25
.github/workflows/issue_wontfix.yml
vendored
@ -1,25 +0,0 @@
|
||||
name: Issue Wontfix
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
lock-issue:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.label.name == 'wontfix'
|
||||
steps:
|
||||
- name: Create comment
|
||||
uses: actions-cool/issues-helper@v3
|
||||
with:
|
||||
actions: 'create-comment'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
issue-number: ${{ github.event.issue.number }}
|
||||
body: |
|
||||
Hello @${{ github.event.issue.user.login }}, this issue will not be worked on and will be closed.
|
||||
你好 @${{ github.event.issue.user.login }},这不会被处理,将被关闭。
|
||||
- name: Close issue
|
||||
uses: actions-cool/issues-helper@v3
|
||||
with:
|
||||
actions: 'close-issue'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
92
.github/workflows/release.yml
vendored
92
.github/workflows/release.yml
vendored
@ -1,92 +0,0 @@
|
||||
name: release
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
jobs:
|
||||
release:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
# this might remove tools that are actually needed,
|
||||
# if set to "true" but frees about 6 GB
|
||||
tool-cache: false
|
||||
|
||||
# all of these default to true, but feel free to set to
|
||||
# "false" if necessary for your workflow
|
||||
android: true
|
||||
dotnet: true
|
||||
haskell: true
|
||||
large-packages: true
|
||||
docker-images: true
|
||||
swap-storage: true
|
||||
|
||||
- name: Prerelease
|
||||
uses: irongut/EditRelease@v1.2.0
|
||||
with:
|
||||
token: ${{ secrets.MY_TOKEN }}
|
||||
id: ${{ github.event.release.id }}
|
||||
prerelease: true
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo snap install zig --classic --beta
|
||||
docker pull crazymax/xgo:latest
|
||||
go install github.com/crazy-max/xgo@latest
|
||||
sudo apt install upx
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
||||
prerelease: false
|
||||
|
||||
release_desktop:
|
||||
needs: release
|
||||
name: Release desktop
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: alist-org/desktop-release
|
||||
ref: main
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Add tag
|
||||
run: |
|
||||
git config --local user.email "bot@nn.ci"
|
||||
git config --local user.name "IlaBot"
|
||||
version=$(wget -qO- -t1 -T2 "https://api.github.com/repos/alist-org/alist/releases/latest" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
|
||||
git tag -a $version -m "release $version"
|
||||
|
||||
- name: Push tags
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ secrets.MY_TOKEN }}
|
||||
branch: main
|
||||
repository: alist-org/desktop-release
|
34
.github/workflows/release_android.yml
vendored
34
.github/workflows/release_android.yml
vendored
@ -1,34 +0,0 @@
|
||||
name: release_android
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
jobs:
|
||||
release_android:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release android
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
34
.github/workflows/release_freebsd.yml
vendored
34
.github/workflows/release_freebsd.yml
vendored
@ -1,34 +0,0 @@
|
||||
name: release_freebsd
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
jobs:
|
||||
release_freebsd:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release freebsd
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
34
.github/workflows/release_linux_musl.yml
vendored
34
.github/workflows/release_linux_musl.yml
vendored
@ -1,34 +0,0 @@
|
||||
name: release_linux_musl
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
jobs:
|
||||
release_linux_musl:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release linux_musl
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
34
.github/workflows/release_linux_musl_arm.yml
vendored
34
.github/workflows/release_linux_musl_arm.yml
vendored
@ -1,34 +0,0 @@
|
||||
name: release_linux_musl_arm
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
jobs:
|
||||
release_linux_musl_arm:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release linux_musl_arm
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
43
Dockerfile
43
Dockerfile
@ -1,43 +0,0 @@
|
||||
FROM alpine:edge as builder
|
||||
LABEL stage=go-builder
|
||||
WORKDIR /app/
|
||||
RUN apk add --no-cache bash curl gcc git go musl-dev
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
COPY ./ ./
|
||||
RUN bash build.sh release docker
|
||||
|
||||
FROM alpine:edge
|
||||
|
||||
ARG INSTALL_FFMPEG=false
|
||||
ARG INSTALL_ARIA2=false
|
||||
LABEL MAINTAINER="i@nn.ci"
|
||||
|
||||
WORKDIR /opt/alist/
|
||||
|
||||
RUN apk update && \
|
||||
apk upgrade --no-cache && \
|
||||
apk add --no-cache bash ca-certificates su-exec tzdata; \
|
||||
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
|
||||
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
|
||||
mkdir -p /opt/aria2/.aria2 && \
|
||||
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
|
||||
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
|
||||
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
|
||||
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
|
||||
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
|
||||
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
|
||||
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
|
||||
touch /opt/aria2/.aria2/aria2.session && \
|
||||
/opt/aria2/.aria2/tracker.sh ; \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
COPY --from=builder /app/bin/alist ./
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /opt/alist/alist && \
|
||||
chmod +x /entrypoint.sh && /entrypoint.sh version
|
||||
|
||||
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
VOLUME /opt/alist/data/
|
||||
EXPOSE 5244 5245
|
||||
CMD [ "/entrypoint.sh" ]
|
@ -1,35 +0,0 @@
|
||||
FROM alpine:edge
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG INSTALL_FFMPEG=false
|
||||
ARG INSTALL_ARIA2=false
|
||||
LABEL MAINTAINER="i@nn.ci"
|
||||
|
||||
WORKDIR /opt/alist/
|
||||
|
||||
RUN apk update && \
|
||||
apk upgrade --no-cache && \
|
||||
apk add --no-cache bash ca-certificates su-exec tzdata; \
|
||||
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
|
||||
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
|
||||
mkdir -p /opt/aria2/.aria2 && \
|
||||
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
|
||||
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
|
||||
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
|
||||
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
|
||||
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
|
||||
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
|
||||
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
|
||||
touch /opt/aria2/.aria2/aria2.session && \
|
||||
/opt/aria2/.aria2/tracker.sh ; \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
COPY /build/${TARGETPLATFORM}/alist ./
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /opt/alist/alist && \
|
||||
chmod +x /entrypoint.sh && /entrypoint.sh version
|
||||
|
||||
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
VOLUME /opt/alist/data/
|
||||
EXPOSE 5244 5245
|
||||
CMD [ "/entrypoint.sh" ]
|
141
README.md
141
README.md
@ -1,141 +0,0 @@
|
||||
<div align="center">
|
||||
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||
<p><em>🗂️A file list program that supports multiple storages, powered by Gin and Solidjs.</em></p>
|
||||
<div>
|
||||
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
||||
<img src="https://goreportcard.com/badge/github.com/alist-org/alist/v3" alt="latest version" />
|
||||
</a>
|
||||
<a href="https://github.com/alist-org/alist/blob/main/LICENSE">
|
||||
<img src="https://img.shields.io/github/license/Xhofe/alist" alt="License" />
|
||||
</a>
|
||||
<a href="https://github.com/alist-org/alist/actions?query=workflow%3ABuild">
|
||||
<img src="https://img.shields.io/github/actions/workflow/status/Xhofe/alist/build.yml?branch=main" alt="Build status" />
|
||||
</a>
|
||||
<a href="https://github.com/alist-org/alist/releases">
|
||||
<img src="https://img.shields.io/github/release/Xhofe/alist" alt="latest version" />
|
||||
</a>
|
||||
<a title="Crowdin" target="_blank" href="https://crwd.in/alist">
|
||||
<img src="https://badges.crowdin.net/alist/localized.svg">
|
||||
</a>
|
||||
</div>
|
||||
<div>
|
||||
<a href="https://github.com/alist-org/alist/discussions">
|
||||
<img src="https://img.shields.io/github/discussions/Xhofe/alist?color=%23ED8936" alt="discussions" />
|
||||
</a>
|
||||
<a href="https://discord.gg/F4ymsH4xv2">
|
||||
<img src="https://img.shields.io/discord/1018870125102895134?logo=discord" alt="discussions" />
|
||||
</a>
|
||||
<a href="https://github.com/alist-org/alist/releases">
|
||||
<img src="https://img.shields.io/github/downloads/Xhofe/alist/total?color=%239F7AEA&logo=github" alt="Downloads" />
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/xhofe/alist">
|
||||
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
|
||||
</a>
|
||||
<a href="https://alist.nn.ci/guide/sponsor.html">
|
||||
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
||||
|
||||
## Features
|
||||
|
||||
- [x] Multiple storages
|
||||
- [x] Local storage
|
||||
- [x] [Aliyundrive](https://www.alipan.com/)
|
||||
- [x] OneDrive / Sharepoint ([global](https://www.office.com/), [cn](https://portal.partner.microsoftonline.cn),de,us)
|
||||
- [x] [189cloud](https://cloud.189.cn) (Personal, Family)
|
||||
- [x] [GoogleDrive](https://drive.google.com/)
|
||||
- [x] [123pan](https://www.123pan.com/)
|
||||
- [x] FTP / SFTP
|
||||
- [x] [PikPak](https://www.mypikpak.com/)
|
||||
- [x] [S3](https://aws.amazon.com/s3/)
|
||||
- [x] [Seafile](https://seafile.com/)
|
||||
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||
- [x] WebDav(Support OneDrive/SharePoint without API)
|
||||
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn/)
|
||||
- [x] [139yun](https://yun.139.com/) (Personal, Family, Group)
|
||||
- [x] [YandexDisk](https://disk.yandex.com/)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com/)
|
||||
- [x] [Terabox](https://www.terabox.com/main)
|
||||
- [x] [UC](https://drive.uc.cn)
|
||||
- [x] [Quark](https://pan.quark.cn)
|
||||
- [x] [Thunder](https://pan.xunlei.com)
|
||||
- [x] [Lanzou](https://www.lanzou.com/)
|
||||
- [x] [ILanzou](https://www.ilanzou.com/)
|
||||
- [x] [Aliyundrive share](https://www.alipan.com/)
|
||||
- [x] [Google photo](https://photos.google.com/)
|
||||
- [x] [Mega.nz](https://mega.nz)
|
||||
- [x] [Baidu photo](https://photo.baidu.com/)
|
||||
- [x] SMB
|
||||
- [x] [115](https://115.com/)
|
||||
- [X] Cloudreve
|
||||
- [x] [Dropbox](https://www.dropbox.com/)
|
||||
- [x] [FeijiPan](https://www.feijipan.com/)
|
||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
||||
- [x] Easy to deploy and out-of-the-box
|
||||
- [x] File preview (PDF, markdown, code, plain text, ...)
|
||||
- [x] Image preview in gallery mode
|
||||
- [x] Video and audio preview, support lyrics and subtitles
|
||||
- [x] Office documents preview (docx, pptx, xlsx, ...)
|
||||
- [x] `README.md` preview rendering
|
||||
- [x] File permalink copy and direct file download
|
||||
- [x] Dark mode
|
||||
- [x] I18n
|
||||
- [x] Protected routes (password protection and authentication)
|
||||
- [x] WebDav (see https://alist.nn.ci/guide/webdav.html for details)
|
||||
- [x] [Docker Deploy](https://hub.docker.com/r/xhofe/alist)
|
||||
- [x] Cloudflare Workers proxy
|
||||
- [x] File/Folder package download
|
||||
- [x] Web upload(Can allow visitors to upload), delete, mkdir, rename, move and copy
|
||||
- [x] Offline download
|
||||
- [x] Copy files between two storage
|
||||
- [x] Multi-thread downloading acceleration for single-thread download/stream
|
||||
|
||||
## Document
|
||||
|
||||
<https://alistgo.com/>
|
||||
|
||||
## Demo
|
||||
|
||||
<https://al.nn.ci>
|
||||
|
||||
## Discussion
|
||||
|
||||
Please go to our [discussion forum](https://github.com/alist-org/alist/discussions) for general questions, **issues are for bug reports and feature requests only.**
|
||||
|
||||
## Sponsor
|
||||
|
||||
AList is an open-source software, if you happen to like this project and want me to keep going, please consider sponsoring me or providing a single donation! Thanks for all the love and support:
|
||||
https://alist.nn.ci/guide/sponsor.html
|
||||
|
||||
### Special sponsors
|
||||
|
||||
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV.
|
||||
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (sponsored Chinese API server)
|
||||
- [找资源](http://zhaoziyuan2.cc/) - 阿里云盘资源搜索引擎
|
||||
|
||||
## Contributors
|
||||
|
||||
Thanks goes to these wonderful people:
|
||||
|
||||
[](https://github.com/alist-org/alist/graphs/contributors)
|
||||
|
||||
## License
|
||||
|
||||
The `AList` is open-source software licensed under the AGPL-3.0 license.
|
||||
|
||||
## Disclaimer
|
||||
- This program is a free and open source project. It is designed to share files on the network disk, which is convenient for downloading and learning Golang. Please abide by relevant laws and regulations when using it, and do not abuse it;
|
||||
- This program is implemented by calling the official sdk/interface, without destroying the official interface behavior;
|
||||
- This program only does 302 redirect/traffic forwarding, and does not intercept, store, or tamper with any user data;
|
||||
- Before using this program, you should understand and bear the corresponding risks, including but not limited to account ban, download speed limit, etc., which is none of this program's business;
|
||||
- If there is any infringement, please contact me by [email](mailto:i@nn.ci), and it will be dealt with in time.
|
||||
|
||||
---
|
||||
|
||||
> [@GitHub](https://github.com/alist-org) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2)
|
139
README_cn.md
139
README_cn.md
@ -1,139 +0,0 @@
|
||||
<div align="center">
|
||||
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||
<p><em>🗂一个支持多存储的文件列表程序,使用 Gin 和 Solidjs。</em></p>
|
||||
<div>
|
||||
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
||||
<img src="https://goreportcard.com/badge/github.com/alist-org/alist/v3" alt="latest version" />
|
||||
</a>
|
||||
<a href="https://github.com/alist-org/alist/blob/main/LICENSE">
|
||||
<img src="https://img.shields.io/github/license/Xhofe/alist" alt="License" />
|
||||
</a>
|
||||
<a href="https://github.com/alist-org/alist/actions?query=workflow%3ABuild">
|
||||
<img src="https://img.shields.io/github/actions/workflow/status/Xhofe/alist/build.yml?branch=main" alt="Build status" />
|
||||
</a>
|
||||
<a href="https://github.com/alist-org/alist/releases">
|
||||
<img src="https://img.shields.io/github/release/Xhofe/alist" alt="latest version" />
|
||||
</a>
|
||||
<a title="Crowdin" target="_blank" href="https://crwd.in/alist">
|
||||
<img src="https://badges.crowdin.net/alist/localized.svg">
|
||||
</a>
|
||||
</div>
|
||||
<div>
|
||||
<a href="https://github.com/alist-org/alist/discussions">
|
||||
<img src="https://img.shields.io/github/discussions/Xhofe/alist?color=%23ED8936" alt="discussions" />
|
||||
</a>
|
||||
<a href="https://discord.gg/F4ymsH4xv2">
|
||||
<img src="https://img.shields.io/discord/1018870125102895134?logo=discord" alt="discussions" />
|
||||
</a>
|
||||
<a href="https://github.com/alist-org/alist/releases">
|
||||
<img src="https://img.shields.io/github/downloads/Xhofe/alist/total?color=%239F7AEA&logo=github" alt="Downloads" />
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/xhofe/alist">
|
||||
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
|
||||
</a>
|
||||
<a href="https://alist.nn.ci/zh/guide/sponsor.html">
|
||||
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
[English](./README.md) | 中文 | [日本語](./README_ja.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
||||
|
||||
## 功能
|
||||
|
||||
- [x] 多种存储
|
||||
- [x] 本地存储
|
||||
- [x] [阿里云盘](https://www.alipan.com/)
|
||||
- [x] OneDrive / Sharepoint([国际版](https://www.office.com/), [世纪互联](https://portal.partner.microsoftonline.cn),de,us)
|
||||
- [x] [天翼云盘](https://cloud.189.cn) (个人云, 家庭云)
|
||||
- [x] [GoogleDrive](https://drive.google.com/)
|
||||
- [x] [123云盘](https://www.123pan.com/)
|
||||
- [x] FTP / SFTP
|
||||
- [x] [PikPak](https://www.mypikpak.com/)
|
||||
- [x] [S3](https://aws.amazon.com/cn/s3/)
|
||||
- [x] [Seafile](https://seafile.com/)
|
||||
- [x] [又拍云对象存储](https://www.upyun.com/products/file-storage)
|
||||
- [x] WebDav(支持无API的OneDrive/SharePoint)
|
||||
- [x] Teambition([中国](https://www.teambition.com/ ),[国际](https://us.teambition.com/ ))
|
||||
- [x] [分秒帧](https://www.mediatrack.cn/)
|
||||
- [x] [和彩云](https://yun.139.com/) (个人云, 家庭云,共享群组)
|
||||
- [x] [Yandex.Disk](https://disk.yandex.com/)
|
||||
- [x] [百度网盘](http://pan.baidu.com/)
|
||||
- [x] [UC网盘](https://drive.uc.cn)
|
||||
- [x] [夸克网盘](https://pan.quark.cn)
|
||||
- [x] [迅雷网盘](https://pan.xunlei.com)
|
||||
- [x] [蓝奏云](https://www.lanzou.com/)
|
||||
- [x] [蓝奏云优享版](https://www.ilanzou.com/)
|
||||
- [x] [阿里云盘分享](https://www.alipan.com/)
|
||||
- [x] [谷歌相册](https://photos.google.com/)
|
||||
- [x] [Mega.nz](https://mega.nz)
|
||||
- [x] [一刻相册](https://photo.baidu.com/)
|
||||
- [x] SMB
|
||||
- [x] [115](https://115.com/)
|
||||
- [X] Cloudreve
|
||||
- [x] [Dropbox](https://www.dropbox.com/)
|
||||
- [x] [飞机盘](https://www.feijipan.com/)
|
||||
- [x] [多吉云](https://www.dogecloud.com/product/oss)
|
||||
- [x] 部署方便,开箱即用
|
||||
- [x] 文件预览(PDF、markdown、代码、纯文本……)
|
||||
- [x] 画廊模式下的图像预览
|
||||
- [x] 视频和音频预览,支持歌词和字幕
|
||||
- [x] Office 文档预览(docx、pptx、xlsx、...)
|
||||
- [x] `README.md` 预览渲染
|
||||
- [x] 文件永久链接复制和直接文件下载
|
||||
- [x] 黑暗模式
|
||||
- [x] 国际化
|
||||
- [x] 受保护的路由(密码保护和身份验证)
|
||||
- [x] WebDav (具体见 https://alist.nn.ci/zh/guide/webdav.html)
|
||||
- [x] [Docker 部署](https://hub.docker.com/r/xhofe/alist)
|
||||
- [x] Cloudflare workers 中转
|
||||
- [x] 文件/文件夹打包下载
|
||||
- [x] 网页上传(可以允许访客上传),删除,新建文件夹,重命名,移动,复制
|
||||
- [x] 离线下载
|
||||
- [x] 跨存储复制文件
|
||||
- [x] 单线程下载/串流的多线程下载加速
|
||||
|
||||
## 文档
|
||||
|
||||
<https://alist.nn.ci/zh/>
|
||||
|
||||
## Demo
|
||||
|
||||
<https://al.nn.ci>
|
||||
|
||||
## 讨论
|
||||
|
||||
一般问题请到[讨论论坛](https://github.com/alist-org/alist/discussions) ,**issue仅针对错误报告和功能请求。**
|
||||
|
||||
## 赞助
|
||||
|
||||
AList 是一个开源软件,如果你碰巧喜欢这个项目,并希望我继续下去,请考虑赞助我或提供一个单一的捐款!感谢所有的爱和支持:https://alist.nn.ci/zh/guide/sponsor.html
|
||||
|
||||
### 特别赞助
|
||||
|
||||
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - 苹果生态下优雅的网盘视频播放器,iPhone,iPad,Mac,Apple TV全平台支持。
|
||||
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (国内API服务器赞助)
|
||||
- [找资源](http://zhaoziyuan2.cc/) - 阿里云盘资源搜索引擎
|
||||
|
||||
## 贡献者
|
||||
|
||||
Thanks goes to these wonderful people:
|
||||
|
||||
[](https://github.com/alist-org/alist/graphs/contributors)
|
||||
|
||||
## 许可
|
||||
|
||||
`AList` 是在 AGPL-3.0 许可下许可的开源软件。
|
||||
|
||||
## 免责声明
|
||||
- 本程序为免费开源项目,旨在分享网盘文件,方便下载以及学习golang,使用时请遵守相关法律法规,请勿滥用;
|
||||
- 本程序通过调用官方sdk/接口实现,无破坏官方接口行为;
|
||||
- 本程序仅做302重定向/流量转发,不拦截、存储、篡改任何用户数据;
|
||||
- 在使用本程序之前,你应了解并承担相应的风险,包括但不限于账号被ban,下载限速等,与本程序无关;
|
||||
- 如有侵权,请通过[邮件](mailto:i@nn.ci)与我联系,会及时处理。
|
||||
|
||||
---
|
||||
|
||||
> [@博客](https://nn.ci/) · [@GitHub](https://github.com/alist-org) · [@Telegram群](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2)
|
141
README_ja.md
141
README_ja.md
@ -1,141 +0,0 @@
|
||||
<div align="center">
|
||||
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||
<p><em>🗂️Gin と Solidjs による、複数のストレージをサポートするファイルリストプログラム。</em></p>
|
||||
<div>
|
||||
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
||||
<img src="https://goreportcard.com/badge/github.com/alist-org/alist/v3" alt="latest version" />
|
||||
</a>
|
||||
<a href="https://github.com/alist-org/alist/blob/main/LICENSE">
|
||||
<img src="https://img.shields.io/github/license/Xhofe/alist" alt="License" />
|
||||
</a>
|
||||
<a href="https://github.com/alist-org/alist/actions?query=workflow%3ABuild">
|
||||
<img src="https://img.shields.io/github/actions/workflow/status/Xhofe/alist/build.yml?branch=main" alt="Build status" />
|
||||
</a>
|
||||
<a href="https://github.com/alist-org/alist/releases">
|
||||
<img src="https://img.shields.io/github/release/Xhofe/alist" alt="latest version" />
|
||||
</a>
|
||||
<a title="Crowdin" target="_blank" href="https://crwd.in/alist">
|
||||
<img src="https://badges.crowdin.net/alist/localized.svg">
|
||||
</a>
|
||||
</div>
|
||||
<div>
|
||||
<a href="https://github.com/alist-org/alist/discussions">
|
||||
<img src="https://img.shields.io/github/discussions/Xhofe/alist?color=%23ED8936" alt="discussions" />
|
||||
</a>
|
||||
<a href="https://discord.gg/F4ymsH4xv2">
|
||||
<img src="https://img.shields.io/discord/1018870125102895134?logo=discord" alt="discussions" />
|
||||
</a>
|
||||
<a href="https://github.com/alist-org/alist/releases">
|
||||
<img src="https://img.shields.io/github/downloads/Xhofe/alist/total?color=%239F7AEA&logo=github" alt="Downloads" />
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/xhofe/alist">
|
||||
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
|
||||
</a>
|
||||
<a href="https://alist.nn.ci/guide/sponsor.html">
|
||||
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
[English](./README.md) | [中文](./README_cn.md) | 日本語 | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
||||
|
||||
## 特徴
|
||||
|
||||
- [x] マルチストレージ
|
||||
- [x] ローカルストレージ
|
||||
- [x] [Aliyundrive](https://www.alipan.com/)
|
||||
- [x] OneDrive / Sharepoint ([グローバル](https://www.office.com/), [cn](https://portal.partner.microsoftonline.cn),de,us)
|
||||
- [x] [189cloud](https://cloud.189.cn) (Personal, Family)
|
||||
- [x] [GoogleDrive](https://drive.google.com/)
|
||||
- [x] [123pan](https://www.123pan.com/)
|
||||
- [x] FTP / SFTP
|
||||
- [x] [PikPak](https://www.mypikpak.com/)
|
||||
- [x] [S3](https://aws.amazon.com/s3/)
|
||||
- [x] [Seafile](https://seafile.com/)
|
||||
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||
- [x] WebDav(Support OneDrive/SharePoint without API)
|
||||
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn/)
|
||||
- [x] [139yun](https://yun.139.com/) (Personal, Family, Group)
|
||||
- [x] [YandexDisk](https://disk.yandex.com/)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com/)
|
||||
- [x] [Terabox](https://www.terabox.com/main)
|
||||
- [x] [UC](https://drive.uc.cn)
|
||||
- [x] [Quark](https://pan.quark.cn)
|
||||
- [x] [Thunder](https://pan.xunlei.com)
|
||||
- [x] [Lanzou](https://www.lanzou.com/)
|
||||
- [x] [ILanzou](https://www.ilanzou.com/)
|
||||
- [x] [Aliyundrive share](https://www.alipan.com/)
|
||||
- [x] [Google photo](https://photos.google.com/)
|
||||
- [x] [Mega.nz](https://mega.nz)
|
||||
- [x] [Baidu photo](https://photo.baidu.com/)
|
||||
- [x] SMB
|
||||
- [x] [115](https://115.com/)
|
||||
- [X] Cloudreve
|
||||
- [x] [Dropbox](https://www.dropbox.com/)
|
||||
- [x] [FeijiPan](https://www.feijipan.com/)
|
||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
||||
- [x] デプロイが簡単で、すぐに使える
|
||||
- [x] ファイルプレビュー (PDF, マークダウン, コード, プレーンテキスト, ...)
|
||||
- [x] ギャラリーモードでの画像プレビュー
|
||||
- [x] ビデオとオーディオのプレビュー、歌詞と字幕のサポート
|
||||
- [x] Office ドキュメントのプレビュー (docx, pptx, xlsx, ...)
|
||||
- [x] `README.md` のプレビューレンダリング
|
||||
- [x] ファイルのパーマリンクコピーと直接ダウンロード
|
||||
- [x] ダークモード
|
||||
- [x] 国際化
|
||||
- [x] 保護されたルート (パスワード保護と認証)
|
||||
- [x] WebDav (詳細は https://alist.nn.ci/guide/webdav.html を参照)
|
||||
- [x] [Docker デプロイ](https://hub.docker.com/r/xhofe/alist)
|
||||
- [x] Cloudflare ワーカープロキシ
|
||||
- [x] ファイル/フォルダパッケージのダウンロード
|
||||
- [x] ウェブアップロード(訪問者にアップロードを許可できる), 削除, mkdir, 名前変更, 移動, コピー
|
||||
- [x] オフラインダウンロード
|
||||
- [x] 二つのストレージ間でファイルをコピー
|
||||
- [x] シングルスレッドのダウンロード/ストリーム向けのマルチスレッド ダウンロード アクセラレーション
|
||||
|
||||
## ドキュメント
|
||||
|
||||
<https://alist.nn.ci/>
|
||||
|
||||
## デモ
|
||||
|
||||
<https://al.nn.ci>
|
||||
|
||||
## ディスカッション
|
||||
|
||||
一般的なご質問は[ディスカッションフォーラム](https://github.com/alist-org/alist/discussions)をご利用ください。**問題はバグレポートと機能リクエストのみです。**
|
||||
|
||||
## スポンサー
|
||||
|
||||
AList はオープンソースのソフトウェアです。もしあなたがこのプロジェクトを気に入ってくださり、続けて欲しいと思ってくださるなら、ぜひスポンサーになってくださるか、1口でも寄付をしてくださるようご検討ください!すべての愛とサポートに感謝します:
|
||||
https://alist.nn.ci/guide/sponsor.html
|
||||
|
||||
### スペシャルスポンサー
|
||||
|
||||
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV.
|
||||
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (sponsored Chinese API server)
|
||||
- [找资源](http://zhaoziyuan2.cc/) - 阿里云盘资源搜索引擎
|
||||
|
||||
## コントリビューター
|
||||
|
||||
これらの素晴らしい人々に感謝します:
|
||||
|
||||
[](https://github.com/alist-org/alist/graphs/contributors)
|
||||
|
||||
## ライセンス
|
||||
|
||||
`AList` は AGPL-3.0 ライセンスの下でライセンスされたオープンソースソフトウェアです。
|
||||
|
||||
## 免責事項
|
||||
- このプログラムはフリーでオープンソースのプロジェクトです。ネットワークディスク上でファイルを共有するように設計されており、golang のダウンロードや学習に便利です。利用にあたっては関連法規を遵守し、悪用しないようお願いします;
|
||||
- このプログラムは、公式インターフェースの動作を破壊することなく、公式 sdk/インターフェースを呼び出すことで実装されています;
|
||||
- このプログラムは、302リダイレクト/トラフィック転送のみを行い、いかなるユーザーデータも傍受、保存、改ざんしません;
|
||||
- このプログラムを使用する前に、アカウントの禁止、ダウンロード速度の制限など、対応するリスクを理解し、負担する必要があります;
|
||||
- もし侵害があれば、[メール](mailto:i@nn.ci)で私に連絡してください。
|
||||
|
||||
---
|
||||
|
||||
> [@Blog](https://nn.ci/) · [@GitHub](https://github.com/alist-org) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2)
|
11
buf.gen.yaml
Normal file
11
buf.gen.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
version: v1
|
||||
plugins:
|
||||
- plugin: buf.build/protocolbuffers/go:v1.36.7
|
||||
out: .
|
||||
opt:
|
||||
- paths=source_relative
|
||||
- plugin: buf.build/grpc/go:v1.5.1
|
||||
out: .
|
||||
opt:
|
||||
- paths=source_relative
|
||||
- require_unimplemented_servers=false
|
341
build.sh
341
build.sh
@ -1,341 +0,0 @@
|
||||
appName="alist"
|
||||
builtAt="$(date +'%F %T %z')"
|
||||
gitAuthor="Xhofe <i@nn.ci>"
|
||||
gitCommit=$(git log --pretty=format:"%h" -1)
|
||||
|
||||
if [ "$1" = "dev" ]; then
|
||||
version="dev"
|
||||
webVersion="dev"
|
||||
elif [ "$1" = "beta" ]; then
|
||||
version="beta"
|
||||
webVersion="dev"
|
||||
else
|
||||
git tag -d beta
|
||||
version=$(git describe --abbrev=0 --tags)
|
||||
webVersion=$(wget -qO- -t1 -T2 "https://api.github.com/repos/alist-org/alist-web/releases/latest" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
|
||||
fi
|
||||
|
||||
echo "backend version: $version"
|
||||
echo "frontend version: $webVersion"
|
||||
|
||||
ldflags="\
|
||||
-w -s \
|
||||
-X 'github.com/alist-org/alist/v3/internal/conf.BuiltAt=$builtAt' \
|
||||
-X 'github.com/alist-org/alist/v3/internal/conf.GitAuthor=$gitAuthor' \
|
||||
-X 'github.com/alist-org/alist/v3/internal/conf.GitCommit=$gitCommit' \
|
||||
-X 'github.com/alist-org/alist/v3/internal/conf.Version=$version' \
|
||||
-X 'github.com/alist-org/alist/v3/internal/conf.WebVersion=$webVersion' \
|
||||
"
|
||||
|
||||
FetchWebDev() {
|
||||
curl -L https://codeload.github.com/alist-org/web-dist/tar.gz/refs/heads/dev -o web-dist-dev.tar.gz
|
||||
tar -zxvf web-dist-dev.tar.gz
|
||||
rm -rf public/dist
|
||||
mv -f web-dist-dev/dist public
|
||||
rm -rf web-dist-dev web-dist-dev.tar.gz
|
||||
}
|
||||
|
||||
FetchWebRelease() {
|
||||
curl -L https://github.com/alist-org/alist-web/releases/latest/download/dist.tar.gz -o dist.tar.gz
|
||||
tar -zxvf dist.tar.gz
|
||||
rm -rf public/dist
|
||||
mv -f dist public
|
||||
rm -rf dist.tar.gz
|
||||
}
|
||||
|
||||
BuildWinArm64() {
|
||||
echo building for windows-arm64
|
||||
chmod +x ./wrapper/zcc-arm64
|
||||
chmod +x ./wrapper/zcxx-arm64
|
||||
export GOOS=windows
|
||||
export GOARCH=arm64
|
||||
export CC=$(pwd)/wrapper/zcc-arm64
|
||||
export CXX=$(pwd)/wrapper/zcxx-arm64
|
||||
export CGO_ENABLED=1
|
||||
go build -o "$1" -ldflags="$ldflags" -tags=jsoniter .
|
||||
}
|
||||
|
||||
BuildDev() {
|
||||
rm -rf .git/
|
||||
mkdir -p "dist"
|
||||
muslflags="--extldflags '-static -fpic' $ldflags"
|
||||
BASE="https://musl.nn.ci/"
|
||||
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross)
|
||||
for i in "${FILES[@]}"; do
|
||||
url="${BASE}${i}.tgz"
|
||||
curl -L -o "${i}.tgz" "${url}"
|
||||
sudo tar xf "${i}.tgz" --strip-components 1 -C /usr/local
|
||||
done
|
||||
OS_ARCHES=(linux-musl-amd64 linux-musl-arm64)
|
||||
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc)
|
||||
for i in "${!OS_ARCHES[@]}"; do
|
||||
os_arch=${OS_ARCHES[$i]}
|
||||
cgo_cc=${CGO_ARGS[$i]}
|
||||
echo building for ${os_arch}
|
||||
export GOOS=${os_arch%%-*}
|
||||
export GOARCH=${os_arch##*-}
|
||||
export CC=${cgo_cc}
|
||||
export CGO_ENABLED=1
|
||||
go build -o ./dist/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter .
|
||||
done
|
||||
xgo -targets=windows/amd64,darwin/amd64,darwin/arm64 -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
|
||||
mv alist-* dist
|
||||
cd dist
|
||||
cp ./alist-windows-amd64.exe ./alist-windows-amd64-upx.exe
|
||||
upx -9 ./alist-windows-amd64-upx.exe
|
||||
find . -type f -print0 | xargs -0 md5sum >md5.txt
|
||||
cat md5.txt
|
||||
}
|
||||
|
||||
BuildDocker() {
|
||||
go build -o ./bin/alist -ldflags="$ldflags" -tags=jsoniter .
|
||||
}
|
||||
|
||||
PrepareBuildDockerMusl() {
|
||||
mkdir -p build/musl-libs
|
||||
BASE="https://musl.cc/"
|
||||
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross)
|
||||
for i in "${FILES[@]}"; do
|
||||
url="${BASE}${i}.tgz"
|
||||
lib_tgz="build/${i}.tgz"
|
||||
curl -L -o "${lib_tgz}" "${url}"
|
||||
tar xf "${lib_tgz}" --strip-components 1 -C build/musl-libs
|
||||
rm -f "${lib_tgz}"
|
||||
done
|
||||
}
|
||||
|
||||
BuildDockerMultiplatform() {
|
||||
go mod download
|
||||
|
||||
# run PrepareBuildDockerMusl before build
|
||||
export PATH=$PATH:$PWD/build/musl-libs/bin
|
||||
|
||||
docker_lflags="--extldflags '-static -fpic' $ldflags"
|
||||
export CGO_ENABLED=1
|
||||
|
||||
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x linux-riscv64 linux-ppc64le)
|
||||
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc)
|
||||
for i in "${!OS_ARCHES[@]}"; do
|
||||
os_arch=${OS_ARCHES[$i]}
|
||||
cgo_cc=${CGO_ARGS[$i]}
|
||||
os=${os_arch%%-*}
|
||||
arch=${os_arch##*-}
|
||||
export GOOS=$os
|
||||
export GOARCH=$arch
|
||||
export CC=${cgo_cc}
|
||||
echo "building for $os_arch"
|
||||
go build -o build/$os/$arch/alist -ldflags="$docker_lflags" -tags=jsoniter .
|
||||
done
|
||||
|
||||
DOCKER_ARM_ARCHES=(linux-arm/v6 linux-arm/v7)
|
||||
CGO_ARGS=(armv6-linux-musleabihf-gcc armv7l-linux-musleabihf-gcc)
|
||||
GO_ARM=(6 7)
|
||||
export GOOS=linux
|
||||
export GOARCH=arm
|
||||
for i in "${!DOCKER_ARM_ARCHES[@]}"; do
|
||||
docker_arch=${DOCKER_ARM_ARCHES[$i]}
|
||||
cgo_cc=${CGO_ARGS[$i]}
|
||||
export GOARM=${GO_ARM[$i]}
|
||||
export CC=${cgo_cc}
|
||||
echo "building for $docker_arch"
|
||||
go build -o build/${docker_arch%%-*}/${docker_arch##*-}/alist -ldflags="$docker_lflags" -tags=jsoniter .
|
||||
done
|
||||
}
|
||||
|
||||
BuildRelease() {
|
||||
rm -rf .git/
|
||||
mkdir -p "build"
|
||||
BuildWinArm64 ./build/alist-windows-arm64.exe
|
||||
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
|
||||
# why? Because some target platforms seem to have issues with upx compression
|
||||
upx -9 ./alist-linux-amd64
|
||||
cp ./alist-windows-amd64.exe ./alist-windows-amd64-upx.exe
|
||||
upx -9 ./alist-windows-amd64-upx.exe
|
||||
mv alist-* build
|
||||
}
|
||||
|
||||
BuildReleaseLinuxMusl() {
|
||||
rm -rf .git/
|
||||
mkdir -p "build"
|
||||
muslflags="--extldflags '-static -fpic' $ldflags"
|
||||
BASE="https://musl.nn.ci/"
|
||||
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross mips-linux-musl-cross mips64-linux-musl-cross mips64el-linux-musl-cross mipsel-linux-musl-cross powerpc64le-linux-musl-cross s390x-linux-musl-cross)
|
||||
for i in "${FILES[@]}"; do
|
||||
url="${BASE}${i}.tgz"
|
||||
curl -L -o "${i}.tgz" "${url}"
|
||||
sudo tar xf "${i}.tgz" --strip-components 1 -C /usr/local
|
||||
rm -f "${i}.tgz"
|
||||
done
|
||||
OS_ARCHES=(linux-musl-amd64 linux-musl-arm64 linux-musl-mips linux-musl-mips64 linux-musl-mips64le linux-musl-mipsle linux-musl-ppc64le linux-musl-s390x)
|
||||
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc mips-linux-musl-gcc mips64-linux-musl-gcc mips64el-linux-musl-gcc mipsel-linux-musl-gcc powerpc64le-linux-musl-gcc s390x-linux-musl-gcc)
|
||||
for i in "${!OS_ARCHES[@]}"; do
|
||||
os_arch=${OS_ARCHES[$i]}
|
||||
cgo_cc=${CGO_ARGS[$i]}
|
||||
echo building for ${os_arch}
|
||||
export GOOS=${os_arch%%-*}
|
||||
export GOARCH=${os_arch##*-}
|
||||
export CC=${cgo_cc}
|
||||
export CGO_ENABLED=1
|
||||
go build -o ./build/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter .
|
||||
done
|
||||
}
|
||||
|
||||
BuildReleaseLinuxMuslArm() {
|
||||
rm -rf .git/
|
||||
mkdir -p "build"
|
||||
muslflags="--extldflags '-static -fpic' $ldflags"
|
||||
BASE="https://musl.nn.ci/"
|
||||
# FILES=(arm-linux-musleabi-cross arm-linux-musleabihf-cross armeb-linux-musleabi-cross armeb-linux-musleabihf-cross armel-linux-musleabi-cross armel-linux-musleabihf-cross armv5l-linux-musleabi-cross armv5l-linux-musleabihf-cross armv6-linux-musleabi-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross armv7m-linux-musleabi-cross armv7r-linux-musleabihf-cross)
|
||||
FILES=(arm-linux-musleabi-cross arm-linux-musleabihf-cross armel-linux-musleabi-cross armel-linux-musleabihf-cross armv5l-linux-musleabi-cross armv5l-linux-musleabihf-cross armv6-linux-musleabi-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross armv7m-linux-musleabi-cross armv7r-linux-musleabihf-cross)
|
||||
for i in "${FILES[@]}"; do
|
||||
url="${BASE}${i}.tgz"
|
||||
curl -L -o "${i}.tgz" "${url}"
|
||||
sudo tar xf "${i}.tgz" --strip-components 1 -C /usr/local
|
||||
rm -f "${i}.tgz"
|
||||
done
|
||||
# OS_ARCHES=(linux-musleabi-arm linux-musleabihf-arm linux-musleabi-armeb linux-musleabihf-armeb linux-musleabi-armel linux-musleabihf-armel linux-musleabi-armv5l linux-musleabihf-armv5l linux-musleabi-armv6 linux-musleabihf-armv6 linux-musleabihf-armv7l linux-musleabi-armv7m linux-musleabihf-armv7r)
|
||||
# CGO_ARGS=(arm-linux-musleabi-gcc arm-linux-musleabihf-gcc armeb-linux-musleabi-gcc armeb-linux-musleabihf-gcc armel-linux-musleabi-gcc armel-linux-musleabihf-gcc armv5l-linux-musleabi-gcc armv5l-linux-musleabihf-gcc armv6-linux-musleabi-gcc armv6-linux-musleabihf-gcc armv7l-linux-musleabihf-gcc armv7m-linux-musleabi-gcc armv7r-linux-musleabihf-gcc)
|
||||
# GOARMS=('' '' '' '' '' '' '5' '5' '6' '6' '7' '7' '7')
|
||||
OS_ARCHES=(linux-musleabi-arm linux-musleabihf-arm linux-musleabi-armel linux-musleabihf-armel linux-musleabi-armv5l linux-musleabihf-armv5l linux-musleabi-armv6 linux-musleabihf-armv6 linux-musleabihf-armv7l linux-musleabi-armv7m linux-musleabihf-armv7r)
|
||||
CGO_ARGS=(arm-linux-musleabi-gcc arm-linux-musleabihf-gcc armel-linux-musleabi-gcc armel-linux-musleabihf-gcc armv5l-linux-musleabi-gcc armv5l-linux-musleabihf-gcc armv6-linux-musleabi-gcc armv6-linux-musleabihf-gcc armv7l-linux-musleabihf-gcc armv7m-linux-musleabi-gcc armv7r-linux-musleabihf-gcc)
|
||||
GOARMS=('' '' '' '' '5' '5' '6' '6' '7' '7' '7')
|
||||
for i in "${!OS_ARCHES[@]}"; do
|
||||
os_arch=${OS_ARCHES[$i]}
|
||||
cgo_cc=${CGO_ARGS[$i]}
|
||||
arm=${GOARMS[$i]}
|
||||
echo building for ${os_arch}
|
||||
export GOOS=linux
|
||||
export GOARCH=arm
|
||||
export CC=${cgo_cc}
|
||||
export CGO_ENABLED=1
|
||||
export GOARM=${arm}
|
||||
go build -o ./build/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter .
|
||||
done
|
||||
}
|
||||
|
||||
BuildReleaseAndroid() {
|
||||
rm -rf .git/
|
||||
mkdir -p "build"
|
||||
wget https://dl.google.com/android/repository/android-ndk-r26b-linux.zip
|
||||
unzip android-ndk-r26b-linux.zip
|
||||
rm android-ndk-r26b-linux.zip
|
||||
OS_ARCHES=(amd64 arm64 386 arm)
|
||||
CGO_ARGS=(x86_64-linux-android24-clang aarch64-linux-android24-clang i686-linux-android24-clang armv7a-linux-androideabi24-clang)
|
||||
for i in "${!OS_ARCHES[@]}"; do
|
||||
os_arch=${OS_ARCHES[$i]}
|
||||
cgo_cc=$(realpath android-ndk-r26b/toolchains/llvm/prebuilt/linux-x86_64/bin/${CGO_ARGS[$i]})
|
||||
echo building for android-${os_arch}
|
||||
export GOOS=android
|
||||
export GOARCH=${os_arch##*-}
|
||||
export CC=${cgo_cc}
|
||||
export CGO_ENABLED=1
|
||||
go build -o ./build/$appName-android-$os_arch -ldflags="$ldflags" -tags=jsoniter .
|
||||
android-ndk-r26b/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip ./build/$appName-android-$os_arch
|
||||
done
|
||||
}
|
||||
|
||||
BuildReleaseFreeBSD() {
|
||||
rm -rf .git/
|
||||
mkdir -p "build/freebsd"
|
||||
OS_ARCHES=(amd64 arm64 i386)
|
||||
GO_ARCHES=(amd64 arm64 386)
|
||||
CGO_ARGS=(x86_64-unknown-freebsd14.1 aarch64-unknown-freebsd14.1 i386-unknown-freebsd14.1)
|
||||
for i in "${!OS_ARCHES[@]}"; do
|
||||
os_arch=${OS_ARCHES[$i]}
|
||||
cgo_cc="clang --target=${CGO_ARGS[$i]} --sysroot=/opt/freebsd/${os_arch}"
|
||||
echo building for freebsd-${os_arch}
|
||||
sudo mkdir -p "/opt/freebsd/${os_arch}"
|
||||
wget -q https://download.freebsd.org/releases/${os_arch}/14.1-RELEASE/base.txz
|
||||
sudo tar -xf ./base.txz -C /opt/freebsd/${os_arch}
|
||||
rm base.txz
|
||||
export GOOS=freebsd
|
||||
export GOARCH=${GO_ARCHES[$i]}
|
||||
export CC=${cgo_cc}
|
||||
export CGO_ENABLED=1
|
||||
export CGO_LDFLAGS="-fuse-ld=lld"
|
||||
go build -o ./build/$appName-freebsd-$os_arch -ldflags="$ldflags" -tags=jsoniter .
|
||||
done
|
||||
}
|
||||
|
||||
MakeRelease() {
|
||||
cd build
|
||||
mkdir compress
|
||||
for i in $(find . -type f -name "$appName-linux-*"); do
|
||||
cp "$i" alist
|
||||
tar -czvf compress/"$i".tar.gz alist
|
||||
rm -f alist
|
||||
done
|
||||
for i in $(find . -type f -name "$appName-android-*"); do
|
||||
cp "$i" alist
|
||||
tar -czvf compress/"$i".tar.gz alist
|
||||
rm -f alist
|
||||
done
|
||||
for i in $(find . -type f -name "$appName-darwin-*"); do
|
||||
cp "$i" alist
|
||||
tar -czvf compress/"$i".tar.gz alist
|
||||
rm -f alist
|
||||
done
|
||||
for i in $(find . -type f -name "$appName-freebsd-*"); do
|
||||
cp "$i" alist
|
||||
tar -czvf compress/"$i".tar.gz alist
|
||||
rm -f alist
|
||||
done
|
||||
for i in $(find . -type f -name "$appName-windows-*"); do
|
||||
cp "$i" alist.exe
|
||||
zip compress/$(echo $i | sed 's/\.[^.]*$//').zip alist.exe
|
||||
rm -f alist.exe
|
||||
done
|
||||
cd compress
|
||||
find . -type f -print0 | xargs -0 md5sum >"$1"
|
||||
cat "$1"
|
||||
cd ../..
|
||||
}
|
||||
|
||||
if [ "$1" = "dev" ]; then
|
||||
FetchWebDev
|
||||
if [ "$2" = "docker" ]; then
|
||||
BuildDocker
|
||||
elif [ "$2" = "docker-multiplatform" ]; then
|
||||
BuildDockerMultiplatform
|
||||
elif [ "$2" = "web" ]; then
|
||||
echo "web only"
|
||||
else
|
||||
BuildDev
|
||||
fi
|
||||
elif [ "$1" = "release" -o "$1" = "beta" ]; then
|
||||
if [ "$1" = "beta" ]; then
|
||||
FetchWebDev
|
||||
else
|
||||
FetchWebRelease
|
||||
fi
|
||||
if [ "$2" = "docker" ]; then
|
||||
BuildDocker
|
||||
elif [ "$2" = "docker-multiplatform" ]; then
|
||||
BuildDockerMultiplatform
|
||||
elif [ "$2" = "linux_musl_arm" ]; then
|
||||
BuildReleaseLinuxMuslArm
|
||||
MakeRelease "md5-linux-musl-arm.txt"
|
||||
elif [ "$2" = "linux_musl" ]; then
|
||||
BuildReleaseLinuxMusl
|
||||
MakeRelease "md5-linux-musl.txt"
|
||||
elif [ "$2" = "android" ]; then
|
||||
BuildReleaseAndroid
|
||||
MakeRelease "md5-android.txt"
|
||||
elif [ "$2" = "freebsd" ]; then
|
||||
BuildReleaseFreeBSD
|
||||
MakeRelease "md5-freebsd.txt"
|
||||
elif [ "$2" = "web" ]; then
|
||||
echo "web only"
|
||||
else
|
||||
BuildRelease
|
||||
MakeRelease "md5.txt"
|
||||
fi
|
||||
elif [ "$1" = "prepare" ]; then
|
||||
if [ "$2" = "docker-multiplatform" ]; then
|
||||
PrepareBuildDockerMusl
|
||||
fi
|
||||
elif [ "$1" = "zip" ]; then
|
||||
MakeRelease "$2".txt
|
||||
else
|
||||
echo -e "Parameter error"
|
||||
fi
|
@ -1,51 +1,42 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"context"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/bootstrap"
|
||||
"github.com/alist-org/alist/v3/internal/bootstrap/data"
|
||||
"github.com/alist-org/alist/v3/internal/db"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/OpenListTeam/OpenList/v5/cmd/flags"
|
||||
"github.com/OpenListTeam/OpenList/v5/internal/bootstrap"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func Init() {
|
||||
func Init(ctx context.Context) {
|
||||
if flags.Dev {
|
||||
flags.Debug = true
|
||||
}
|
||||
initLogrus()
|
||||
bootstrap.InitConfig()
|
||||
bootstrap.Log()
|
||||
bootstrap.InitDB()
|
||||
data.InitData()
|
||||
bootstrap.InitStreamLimit()
|
||||
bootstrap.InitIndex()
|
||||
bootstrap.InitUpgradePatch()
|
||||
bootstrap.InitDriverPlugins()
|
||||
}
|
||||
|
||||
func Release() {
|
||||
db.Close()
|
||||
|
||||
}
|
||||
|
||||
var pid = -1
|
||||
var pidFile string
|
||||
|
||||
func initDaemon() {
|
||||
ex, err := os.Executable()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
exPath := filepath.Dir(ex)
|
||||
_ = os.MkdirAll(filepath.Join(exPath, "daemon"), 0700)
|
||||
pidFile = filepath.Join(exPath, "daemon/pid")
|
||||
if utils.Exists(pidFile) {
|
||||
bytes, err := os.ReadFile(pidFile)
|
||||
if err != nil {
|
||||
log.Fatal("failed to read pid file", err)
|
||||
}
|
||||
id, err := strconv.Atoi(string(bytes))
|
||||
if err != nil {
|
||||
log.Fatal("failed to parse pid data", err)
|
||||
}
|
||||
pid = id
|
||||
func initLog(l *logrus.Logger) {
|
||||
if flags.Debug {
|
||||
l.SetLevel(logrus.DebugLevel)
|
||||
l.SetReportCaller(true)
|
||||
} else {
|
||||
l.SetLevel(logrus.InfoLevel)
|
||||
l.SetReportCaller(false)
|
||||
}
|
||||
}
|
||||
func initLogrus() {
|
||||
formatter := logrus.TextFormatter{
|
||||
ForceColors: true,
|
||||
EnvironmentOverrideColors: true,
|
||||
TimestampFormat: "2006-01-02 15:04:05",
|
||||
FullTimestamp: true,
|
||||
}
|
||||
logrus.SetFormatter(&formatter)
|
||||
initLog(logrus.StandardLogger())
|
||||
}
|
||||
|
@ -1,10 +1,40 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
DataDir string
|
||||
ConfigFile string
|
||||
Debug bool
|
||||
NoPrefix bool
|
||||
Dev bool
|
||||
ForceBinDir bool
|
||||
LogStd bool
|
||||
|
||||
pwd string
|
||||
)
|
||||
|
||||
// Program working directory
|
||||
func PWD() string {
|
||||
if pwd != "" {
|
||||
return pwd
|
||||
}
|
||||
if ForceBinDir {
|
||||
ex, err := os.Executable()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
pwd = filepath.Dir(ex)
|
||||
return pwd
|
||||
}
|
||||
d, err := os.Getwd()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
pwd = d
|
||||
return d
|
||||
}
|
||||
|
17
cmd/root.go
17
cmd/root.go
@ -4,19 +4,16 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/alist-org/alist/v3/cmd/flags"
|
||||
_ "github.com/alist-org/alist/v3/drivers"
|
||||
_ "github.com/alist-org/alist/v3/internal/archive"
|
||||
_ "github.com/alist-org/alist/v3/internal/offline_download"
|
||||
"github.com/OpenListTeam/OpenList/v5/cmd/flags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "alist",
|
||||
Use: "openlist",
|
||||
Short: "A file list program that supports multiple storage.",
|
||||
Long: `A file list program that supports multiple storage,
|
||||
built with love by Xhofe and friends in Go/Solid.js.
|
||||
Complete documentation is available at https://alist.nn.ci/`,
|
||||
built with love by OpenListTeam.
|
||||
Complete documentation is available at https://doc.oplist.org/`,
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
@ -27,10 +24,10 @@ func Execute() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data folder")
|
||||
RootCmd.PersistentFlags().StringVarP(&flags.ConfigFile, "config", "c", "data/config.json", "config file")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.Debug, "debug", false, "start with debug mode")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.NoPrefix, "no-prefix", false, "disable env prefix")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.Dev, "dev", false, "start with dev mode")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.ForceBinDir, "force-bin-dir", false, "Force to use the directory where the binary file is located as data directory")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.LogStd, "log-std", false, "Force to log to std")
|
||||
RootCmd.PersistentFlags().BoolVarP(&flags.ForceBinDir, "force-bin-dir", "f", false, "force to use the directory where the binary file is located as data directory")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.LogStd, "log-std", false, "force to log to std")
|
||||
}
|
||||
|
198
cmd/server.go
198
cmd/server.go
@ -4,9 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
|
||||
"github.com/KirCute/sftpd-alist"
|
||||
"github.com/alist-org/alist/v3/internal/fs"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -16,14 +13,14 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/cmd/flags"
|
||||
"github.com/alist-org/alist/v3/internal/bootstrap"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/server"
|
||||
"github.com/OpenListTeam/OpenList/v5/cmd/flags"
|
||||
"github.com/OpenListTeam/OpenList/v5/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v5/server"
|
||||
"github.com/gin-gonic/gin"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/h2c"
|
||||
)
|
||||
|
||||
// ServerCmd represents the server command
|
||||
@ -32,206 +29,131 @@ var ServerCmd = &cobra.Command{
|
||||
Short: "Start the server at the specified address",
|
||||
Long: `Start the server at the specified address
|
||||
the address is defined in config file`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Init()
|
||||
if conf.Conf.DelayedStart != 0 {
|
||||
utils.Log.Infof("delayed start for %d seconds", conf.Conf.DelayedStart)
|
||||
time.Sleep(time.Duration(conf.Conf.DelayedStart) * time.Second)
|
||||
}
|
||||
bootstrap.InitOfflineDownloadTools()
|
||||
bootstrap.LoadStorages()
|
||||
bootstrap.InitTaskManager()
|
||||
if !flags.Debug && !flags.Dev {
|
||||
Run: func(_ *cobra.Command, args []string) {
|
||||
serverCtx, serverCancel := context.WithCancel(context.Background())
|
||||
defer serverCancel()
|
||||
Init(serverCtx)
|
||||
|
||||
if !flags.Debug {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
}
|
||||
r := gin.New()
|
||||
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
|
||||
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out))
|
||||
r.Use(gin.RecoveryWithWriter(log.StandardLogger().Out))
|
||||
server.Init(r)
|
||||
|
||||
var httpHandler http.Handler = r
|
||||
if conf.Conf.Scheme.EnableH2c {
|
||||
httpHandler = h2c.NewHandler(r, &http2.Server{})
|
||||
}
|
||||
var httpSrv, httpsSrv, unixSrv *http.Server
|
||||
if conf.Conf.Scheme.HttpPort != -1 {
|
||||
if conf.Conf.Scheme.HttpPort > 0 {
|
||||
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
|
||||
utils.Log.Infof("start HTTP server @ %s", httpBase)
|
||||
httpSrv = &http.Server{Addr: httpBase, Handler: r}
|
||||
log.Infoln("start HTTP server", "@", httpBase)
|
||||
httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler}
|
||||
go func() {
|
||||
err := httpSrv.ListenAndServe()
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
utils.Log.Fatalf("failed to start http: %s", err.Error())
|
||||
log.Errorln("start HTTP server", ":", err)
|
||||
serverCancel()
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.Scheme.HttpsPort != -1 {
|
||||
if conf.Conf.Scheme.HttpsPort > 0 {
|
||||
httpsBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpsPort)
|
||||
utils.Log.Infof("start HTTPS server @ %s", httpsBase)
|
||||
log.Infoln("start HTTPS server", "@", httpsBase)
|
||||
httpsSrv = &http.Server{Addr: httpsBase, Handler: r}
|
||||
go func() {
|
||||
err := httpsSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
utils.Log.Fatalf("failed to start https: %s", err.Error())
|
||||
log.Errorln("start HTTPS server", ":", err)
|
||||
serverCancel()
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.Scheme.UnixFile != "" {
|
||||
utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile)
|
||||
unixSrv = &http.Server{Handler: r}
|
||||
log.Infoln("start Unix server", "@", conf.Conf.Scheme.UnixFile)
|
||||
unixSrv = &http.Server{Handler: httpHandler}
|
||||
go func() {
|
||||
listener, err := net.Listen("unix", conf.Conf.Scheme.UnixFile)
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to listen unix: %+v", err)
|
||||
log.Errorln("start Unix server", ":", err)
|
||||
serverCancel()
|
||||
return
|
||||
}
|
||||
// set socket file permission
|
||||
|
||||
mode, err := strconv.ParseUint(conf.Conf.Scheme.UnixFilePerm, 8, 32)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to parse socket file permission: %+v", err)
|
||||
log.Errorln("parse unix_file_perm", ":", err)
|
||||
} else {
|
||||
err = os.Chmod(conf.Conf.Scheme.UnixFile, os.FileMode(mode))
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to chmod socket file: %+v", err)
|
||||
log.Errorln("chmod socket file", ":", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = unixSrv.Serve(listener)
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
utils.Log.Fatalf("failed to start unix: %s", err.Error())
|
||||
log.Errorln("start Unix server", ":", err)
|
||||
serverCancel()
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.S3.Port != -1 && conf.Conf.S3.Enable {
|
||||
s3r := gin.New()
|
||||
s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
|
||||
server.InitS3(s3r)
|
||||
s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port)
|
||||
utils.Log.Infof("start S3 server @ %s", s3Base)
|
||||
go func() {
|
||||
var err error
|
||||
if conf.Conf.S3.SSL {
|
||||
httpsSrv = &http.Server{Addr: s3Base, Handler: s3r}
|
||||
err = httpsSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
|
||||
}
|
||||
if !conf.Conf.S3.SSL {
|
||||
httpSrv = &http.Server{Addr: s3Base, Handler: s3r}
|
||||
err = httpSrv.ListenAndServe()
|
||||
}
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
utils.Log.Fatalf("failed to start s3 server: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
var ftpDriver *server.FtpMainDriver
|
||||
var ftpServer *ftpserver.FtpServer
|
||||
if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable {
|
||||
var err error
|
||||
ftpDriver, err = server.NewMainDriver()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to start ftp driver: %s", err.Error())
|
||||
} else {
|
||||
utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen)
|
||||
go func() {
|
||||
ftpServer = ftpserver.NewFtpServer(ftpDriver)
|
||||
err = ftpServer.ListenAndServe()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("problem ftp server listening: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
var sftpDriver *server.SftpDriver
|
||||
var sftpServer *sftpd.SftpServer
|
||||
if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable {
|
||||
var err error
|
||||
sftpDriver, err = server.NewSftpDriver()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to start sftp driver: %s", err.Error())
|
||||
} else {
|
||||
utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen)
|
||||
go func() {
|
||||
sftpServer = sftpd.NewSftpServer(sftpDriver)
|
||||
err = sftpServer.RunServer()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("problem sftp server listening: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
// Wait for interrupt signal to gracefully shutdown the server with
|
||||
// a timeout of 1 second.
|
||||
|
||||
quit := make(chan os.Signal, 1)
|
||||
// kill (no param) default send syscanll.SIGTERM
|
||||
// kill -2 is syscall.SIGINT
|
||||
// kill -9 is syscall. SIGKILL but can"t be catch, so don't need add it
|
||||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-quit
|
||||
utils.Log.Println("Shutdown server...")
|
||||
fs.ArchiveContentUploadTaskManager.RemoveAll()
|
||||
select {
|
||||
case <-quit:
|
||||
case <-serverCtx.Done():
|
||||
}
|
||||
|
||||
log.Println("shutdown server...")
|
||||
Release()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
quitCtx, quitCancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer quitCancel()
|
||||
var wg sync.WaitGroup
|
||||
if conf.Conf.Scheme.HttpPort != -1 {
|
||||
if httpSrv != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := httpSrv.Shutdown(ctx); err != nil {
|
||||
utils.Log.Fatal("HTTP server shutdown err: ", err)
|
||||
if err := httpSrv.Shutdown(quitCtx); err != nil {
|
||||
log.Errorln("shutdown HTTP server", ":", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.Scheme.HttpsPort != -1 {
|
||||
if httpsSrv != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := httpsSrv.Shutdown(ctx); err != nil {
|
||||
utils.Log.Fatal("HTTPS server shutdown err: ", err)
|
||||
if err := httpsSrv.Shutdown(quitCtx); err != nil {
|
||||
log.Errorln("shutdown HTTPS server", ":", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.Scheme.UnixFile != "" {
|
||||
if unixSrv != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := unixSrv.Shutdown(ctx); err != nil {
|
||||
utils.Log.Fatal("Unix server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable && ftpServer != nil && ftpDriver != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ftpDriver.Stop()
|
||||
if err := ftpServer.Stop(); err != nil {
|
||||
utils.Log.Fatal("FTP server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable && sftpServer != nil && sftpDriver != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := sftpServer.Close(); err != nil {
|
||||
utils.Log.Fatal("SFTP server shutdown err: ", err)
|
||||
if err := unixSrv.Shutdown(quitCtx); err != nil {
|
||||
log.Errorln("shutdown Unix server", ":", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
utils.Log.Println("Server exit")
|
||||
log.Println("server exit")
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(ServerCmd)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
// and all subcommands, e.g.:
|
||||
// serverCmd.PersistentFlags().String("foo", "", "A help for foo")
|
||||
|
||||
// Cobra supports local flags which will only run when this command
|
||||
// is called directly, e.g.:
|
||||
// serverCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
}
|
||||
|
||||
// OutAlistInit 暴露用于外部启动server的函数
|
||||
func OutAlistInit() {
|
||||
// OutOpenListInit 暴露用于外部启动server的函数
|
||||
func OutOpenListInit() {
|
||||
var (
|
||||
cmd *cobra.Command
|
||||
args []string
|
||||
|
@ -1,36 +0,0 @@
|
||||
package _115_open
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
// Usually one of two
|
||||
driver.RootID
|
||||
// define other
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
|
||||
AccessToken string
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "115 Open",
|
||||
LocalSort: false,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "0",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Open115{}
|
||||
})
|
||||
}
|
@ -1,3 +0,0 @@
|
||||
package _115_open
|
||||
|
||||
// do others that not defined in Driver interface
|
@ -1,156 +0,0 @@
|
||||
package _123
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
func (d *Pan123) getS3PreSignedUrls(ctx context.Context, upReq *UploadResp, start, end int) (*S3PreSignedURLs, error) {
|
||||
data := base.Json{
|
||||
"bucket": upReq.Data.Bucket,
|
||||
"key": upReq.Data.Key,
|
||||
"partNumberEnd": end,
|
||||
"partNumberStart": start,
|
||||
"uploadId": upReq.Data.UploadId,
|
||||
"StorageNode": upReq.Data.StorageNode,
|
||||
}
|
||||
var s3PreSignedUrls S3PreSignedURLs
|
||||
_, err := d.Request(S3PreSignedUrls, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetContext(ctx)
|
||||
}, &s3PreSignedUrls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s3PreSignedUrls, nil
|
||||
}
|
||||
|
||||
func (d *Pan123) getS3Auth(ctx context.Context, upReq *UploadResp, start, end int) (*S3PreSignedURLs, error) {
|
||||
data := base.Json{
|
||||
"StorageNode": upReq.Data.StorageNode,
|
||||
"bucket": upReq.Data.Bucket,
|
||||
"key": upReq.Data.Key,
|
||||
"partNumberEnd": end,
|
||||
"partNumberStart": start,
|
||||
"uploadId": upReq.Data.UploadId,
|
||||
}
|
||||
var s3PreSignedUrls S3PreSignedURLs
|
||||
_, err := d.Request(S3Auth, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetContext(ctx)
|
||||
}, &s3PreSignedUrls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s3PreSignedUrls, nil
|
||||
}
|
||||
|
||||
func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.FileStreamer, isMultipart bool) error {
|
||||
data := base.Json{
|
||||
"StorageNode": upReq.Data.StorageNode,
|
||||
"bucket": upReq.Data.Bucket,
|
||||
"fileId": upReq.Data.FileId,
|
||||
"fileSize": file.GetSize(),
|
||||
"isMultipart": isMultipart,
|
||||
"key": upReq.Data.Key,
|
||||
"uploadId": upReq.Data.UploadId,
|
||||
}
|
||||
_, err := d.Request(UploadCompleteV2, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetContext(ctx)
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
chunkSize := int64(1024 * 1024 * 16)
|
||||
// fetch s3 pre signed urls
|
||||
chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize)))
|
||||
// only 1 batch is allowed
|
||||
isMultipart := chunkCount > 1
|
||||
batchSize := 1
|
||||
getS3UploadUrl := d.getS3Auth
|
||||
if isMultipart {
|
||||
batchSize = 10
|
||||
getS3UploadUrl = d.getS3PreSignedUrls
|
||||
}
|
||||
limited := driver.NewLimitedUploadStream(ctx, file)
|
||||
for i := 1; i <= chunkCount; i += batchSize {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
start := i
|
||||
end := i + batchSize
|
||||
if end > chunkCount+1 {
|
||||
end = chunkCount + 1
|
||||
}
|
||||
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// upload each chunk
|
||||
for j := start; j < end; j++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
curSize := chunkSize
|
||||
if j == chunkCount {
|
||||
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
|
||||
}
|
||||
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(limited, chunkSize), curSize, false, getS3UploadUrl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(float64(j) * 100 / float64(chunkCount))
|
||||
}
|
||||
}
|
||||
// complete s3 upload
|
||||
return d.completeS3(ctx, upReq, file, chunkCount > 1)
|
||||
}
|
||||
|
||||
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader io.Reader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
|
||||
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
|
||||
if uploadUrl == "" {
|
||||
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
|
||||
}
|
||||
req, err := http.NewRequest("PUT", uploadUrl, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = curSize
|
||||
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusForbidden {
|
||||
if retry {
|
||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
|
||||
}
|
||||
// refresh s3 pre signed urls
|
||||
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
|
||||
// retry
|
||||
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,151 +0,0 @@
|
||||
package alias
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/fs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
type Alias struct {
|
||||
model.Storage
|
||||
Addition
|
||||
pathMap map[string][]string
|
||||
autoFlatten bool
|
||||
oneKey string
|
||||
}
|
||||
|
||||
func (d *Alias) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Alias) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Alias) Init(ctx context.Context) error {
|
||||
if d.Paths == "" {
|
||||
return errors.New("paths is required")
|
||||
}
|
||||
d.pathMap = make(map[string][]string)
|
||||
for _, path := range strings.Split(d.Paths, "\n") {
|
||||
path = strings.TrimSpace(path)
|
||||
if path == "" {
|
||||
continue
|
||||
}
|
||||
k, v := getPair(path)
|
||||
d.pathMap[k] = append(d.pathMap[k], v)
|
||||
}
|
||||
if len(d.pathMap) == 1 {
|
||||
for k := range d.pathMap {
|
||||
d.oneKey = k
|
||||
}
|
||||
d.autoFlatten = true
|
||||
} else {
|
||||
d.oneKey = ""
|
||||
d.autoFlatten = false
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Alias) Drop(ctx context.Context) error {
|
||||
d.pathMap = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
if utils.PathEqual(path, "/") {
|
||||
return &model.Object{
|
||||
Name: "Root",
|
||||
IsFolder: true,
|
||||
Path: "/",
|
||||
}, nil
|
||||
}
|
||||
root, sub := d.getRootAndPath(path)
|
||||
dsts, ok := d.pathMap[root]
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
obj, err := d.get(ctx, path, dst, sub)
|
||||
if err == nil {
|
||||
return obj, nil
|
||||
}
|
||||
}
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
path := dir.GetPath()
|
||||
if utils.PathEqual(path, "/") && !d.autoFlatten {
|
||||
return d.listRoot(), nil
|
||||
}
|
||||
root, sub := d.getRootAndPath(path)
|
||||
dsts, ok := d.pathMap[root]
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
var objs []model.Obj
|
||||
fsArgs := &fs.ListArgs{NoLog: true, Refresh: args.Refresh}
|
||||
for _, dst := range dsts {
|
||||
tmp, err := d.list(ctx, dst, sub, fsArgs)
|
||||
if err == nil {
|
||||
objs = append(objs, tmp...)
|
||||
}
|
||||
}
|
||||
return objs, nil
|
||||
}
|
||||
|
||||
func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
root, sub := d.getRootAndPath(file.GetPath())
|
||||
dsts, ok := d.pathMap[root]
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
link, err := d.link(ctx, dst, sub, args)
|
||||
if err == nil {
|
||||
if !args.Redirect && len(link.URL) > 0 {
|
||||
// 正常情况下 多并发 仅支持返回URL的驱动
|
||||
// alias套娃alias 可以让crypt、mega等驱动(不返回URL的) 支持并发
|
||||
if d.DownloadConcurrency > 0 {
|
||||
link.Concurrency = d.DownloadConcurrency
|
||||
}
|
||||
if d.DownloadPartSize > 0 {
|
||||
link.PartSize = d.DownloadPartSize * utils.KB
|
||||
}
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
}
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
reqPath, err := d.getReqPath(ctx, srcObj)
|
||||
if err == nil {
|
||||
return fs.Rename(ctx, *reqPath, newName)
|
||||
}
|
||||
if errs.IsNotImplement(err) {
|
||||
return errors.New("same-name files cannot be Rename")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
|
||||
reqPath, err := d.getReqPath(ctx, obj)
|
||||
if err == nil {
|
||||
return fs.Remove(ctx, *reqPath)
|
||||
}
|
||||
if errs.IsNotImplement(err) {
|
||||
return errors.New("same-name files cannot be Delete")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Alias)(nil)
|
@ -1,158 +0,0 @@
|
||||
package alias
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/fs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/sign"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
)
|
||||
|
||||
func (d *Alias) listRoot() []model.Obj {
|
||||
var objs []model.Obj
|
||||
for k := range d.pathMap {
|
||||
obj := model.Object{
|
||||
Name: k,
|
||||
IsFolder: true,
|
||||
Modified: d.Modified,
|
||||
}
|
||||
objs = append(objs, &obj)
|
||||
}
|
||||
return objs
|
||||
}
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
func getPair(path string) (string, string) {
|
||||
//path = strings.TrimSpace(path)
|
||||
if strings.Contains(path, ":") {
|
||||
pair := strings.SplitN(path, ":", 2)
|
||||
if !strings.Contains(pair[0], "/") {
|
||||
return pair[0], pair[1]
|
||||
}
|
||||
}
|
||||
return stdpath.Base(path), path
|
||||
}
|
||||
|
||||
func (d *Alias) getRootAndPath(path string) (string, string) {
|
||||
if d.autoFlatten {
|
||||
return d.oneKey, path
|
||||
}
|
||||
path = strings.TrimPrefix(path, "/")
|
||||
parts := strings.SplitN(path, "/", 2)
|
||||
if len(parts) == 1 {
|
||||
return parts[0], ""
|
||||
}
|
||||
return parts[0], parts[1]
|
||||
}
|
||||
|
||||
func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Obj, error) {
|
||||
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.Object{
|
||||
Path: path,
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Alias) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]model.Obj, error) {
|
||||
objs, err := fs.List(ctx, stdpath.Join(dst, sub), args)
|
||||
// the obj must implement the model.SetPath interface
|
||||
// return objs, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(objs, func(obj model.Obj) (model.Obj, error) {
|
||||
thumb, ok := model.GetThumb(obj)
|
||||
objRes := model.Object{
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
}
|
||||
if !ok {
|
||||
return &objRes, nil
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs) (*model.Link, error) {
|
||||
reqPath := stdpath.Join(dst, sub)
|
||||
// 参考 crypt 驱动
|
||||
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := storage.(*Alias); !ok && !args.Redirect {
|
||||
link, _, err := op.Link(ctx, storage, reqActualPath, args)
|
||||
return link, err
|
||||
}
|
||||
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if common.ShouldProxy(storage, stdpath.Base(sub)) {
|
||||
link := &model.Link{
|
||||
URL: fmt.Sprintf("%s/p%s?sign=%s",
|
||||
common.GetApiUrl(args.HttpReq),
|
||||
utils.EncodePath(reqPath, true),
|
||||
sign.Sign(reqPath)),
|
||||
}
|
||||
if args.HttpReq != nil && d.ProxyRange {
|
||||
link.RangeReadCloser = common.NoProxyRange
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
link, _, err := op.Link(ctx, storage, reqActualPath, args)
|
||||
return link, err
|
||||
}
|
||||
|
||||
func (d *Alias) getReqPath(ctx context.Context, obj model.Obj) (*string, error) {
|
||||
root, sub := d.getRootAndPath(obj.GetPath())
|
||||
if sub == "" {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
dsts, ok := d.pathMap[root]
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
var reqPath *string
|
||||
for _, dst := range dsts {
|
||||
path := stdpath.Join(dst, sub)
|
||||
_, err := fs.Get(ctx, path, &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if !d.ProtectSameName {
|
||||
return &path, nil
|
||||
}
|
||||
if ok {
|
||||
ok = false
|
||||
} else {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
reqPath = &path
|
||||
}
|
||||
if reqPath == nil {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
return reqPath, nil
|
||||
}
|
@ -1,118 +0,0 @@
|
||||
package alist_v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
)
|
||||
|
||||
type AListV2 struct {
|
||||
model.Storage
|
||||
Addition
|
||||
}
|
||||
|
||||
func (d *AListV2) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *AListV2) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *AListV2) Init(ctx context.Context) error {
|
||||
if len(d.Addition.Address) > 0 && string(d.Addition.Address[len(d.Addition.Address)-1]) == "/" {
|
||||
d.Addition.Address = d.Addition.Address[0 : len(d.Addition.Address)-1]
|
||||
}
|
||||
// TODO login / refresh token
|
||||
//op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AListV2) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AListV2) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
url := d.Address + "/api/public/path"
|
||||
var resp common.Resp[PathResp]
|
||||
_, err := base.RestyClient.R().
|
||||
SetResult(&resp).
|
||||
SetHeader("Authorization", d.AccessToken).
|
||||
SetBody(PathReq{
|
||||
PageNum: 0,
|
||||
PageSize: 0,
|
||||
Path: dir.GetPath(),
|
||||
Password: d.Password,
|
||||
}).Post(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var files []model.Obj
|
||||
for _, f := range resp.Data.Files {
|
||||
file := model.ObjThumb{
|
||||
Object: model.Object{
|
||||
Name: f.Name,
|
||||
Modified: *f.UpdatedAt,
|
||||
Size: f.Size,
|
||||
IsFolder: f.Type == 1,
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},
|
||||
}
|
||||
files = append(files, &file)
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *AListV2) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
url := d.Address + "/api/public/path"
|
||||
var resp common.Resp[PathResp]
|
||||
_, err := base.RestyClient.R().
|
||||
SetResult(&resp).
|
||||
SetHeader("Authorization", d.AccessToken).
|
||||
SetBody(PathReq{
|
||||
PageNum: 0,
|
||||
PageSize: 0,
|
||||
Path: file.GetPath(),
|
||||
Password: d.Password,
|
||||
}).Post(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.Link{
|
||||
URL: resp.Data.Files[0].Url,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *AListV2) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *AListV2) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *AListV2) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *AListV2) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *AListV2) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *AListV2) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
//func (d *AList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
var _ driver.Driver = (*AListV2)(nil)
|
@ -1,26 +0,0 @@
|
||||
package alist_v2
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
Address string `json:"url" required:"true"`
|
||||
Password string `json:"password"`
|
||||
AccessToken string `json:"access_token"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "AList V2",
|
||||
LocalSort: true,
|
||||
NoUpload: true,
|
||||
DefaultRoot: "/",
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &AListV2{}
|
||||
})
|
||||
}
|
@ -1,31 +0,0 @@
|
||||
package alist_v2
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type File struct {
|
||||
Id string `json:"-"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Type int `json:"type"`
|
||||
Driver string `json:"driver"`
|
||||
UpdatedAt *time.Time `json:"updated_at"`
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
Url string `json:"url"`
|
||||
SizeStr string `json:"size_str"`
|
||||
TimeStr string `json:"time_str"`
|
||||
}
|
||||
|
||||
type PathResp struct {
|
||||
Type string `json:"type"`
|
||||
//Meta Meta `json:"meta"`
|
||||
Files []File `json:"files"`
|
||||
}
|
||||
|
||||
type PathReq struct {
|
||||
PageNum int `json:"page_num"`
|
||||
PageSize int `json:"page_size"`
|
||||
Password string `json:"password"`
|
||||
Path string `json:"path"`
|
||||
}
|
@ -1 +0,0 @@
|
||||
package alist_v2
|
@ -1,239 +0,0 @@
|
||||
package alist_v3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type AListV3 struct {
|
||||
model.Storage
|
||||
Addition
|
||||
}
|
||||
|
||||
func (d *AListV3) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *AListV3) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *AListV3) Init(ctx context.Context) error {
|
||||
d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/")
|
||||
var resp common.Resp[MeResp]
|
||||
_, err := d.request("/me", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetResult(&resp)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// if the username is not empty and the username is not the same as the current username, then login again
|
||||
if d.Username != resp.Data.Username {
|
||||
err = d.login()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// re-get the user info
|
||||
_, err = d.request("/me", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetResult(&resp)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Data.Role == model.GUEST {
|
||||
url := d.Address + "/api/public/settings"
|
||||
res, err := base.RestyClient.R().Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
allowMounted := utils.Json.Get(res.Body(), "data", conf.AllowMounted).ToString() == "true"
|
||||
if !allowMounted {
|
||||
return fmt.Errorf("the site does not allow mounted")
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *AListV3) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
var resp common.Resp[FsListResp]
|
||||
_, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetResult(&resp).SetBody(ListReq{
|
||||
PageReq: model.PageReq{
|
||||
Page: 1,
|
||||
PerPage: 0,
|
||||
},
|
||||
Path: dir.GetPath(),
|
||||
Password: d.MetaPassword,
|
||||
Refresh: false,
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var files []model.Obj
|
||||
for _, f := range resp.Data.Content {
|
||||
file := model.ObjThumb{
|
||||
Object: model.Object{
|
||||
Name: f.Name,
|
||||
Modified: f.Modified,
|
||||
Ctime: f.Created,
|
||||
Size: f.Size,
|
||||
IsFolder: f.IsDir,
|
||||
HashInfo: utils.FromString(f.HashInfo),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
|
||||
}
|
||||
files = append(files, &file)
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
var resp common.Resp[FsGetResp]
|
||||
// if PassUAToUpsteam is true, then pass the user-agent to the upstream
|
||||
userAgent := base.UserAgent
|
||||
if d.PassUAToUpsteam {
|
||||
userAgent = args.Header.Get("user-agent")
|
||||
if userAgent == "" {
|
||||
userAgent = base.UserAgent
|
||||
}
|
||||
}
|
||||
_, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetResult(&resp).SetBody(FsGetReq{
|
||||
Path: file.GetPath(),
|
||||
Password: d.MetaPassword,
|
||||
}).SetHeader("user-agent", userAgent)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.Link{
|
||||
URL: resp.Data.RawURL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
_, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(MkdirOrLinkReq{
|
||||
Path: path.Join(parentDir.GetPath(), dirName),
|
||||
})
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
_, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(MoveCopyReq{
|
||||
SrcDir: path.Dir(srcObj.GetPath()),
|
||||
DstDir: dstDir.GetPath(),
|
||||
Names: []string{srcObj.GetName()},
|
||||
})
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
_, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(RenameReq{
|
||||
Path: srcObj.GetPath(),
|
||||
Name: newName,
|
||||
})
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
_, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(MoveCopyReq{
|
||||
SrcDir: path.Dir(srcObj.GetPath()),
|
||||
DstDir: dstDir.GetPath(),
|
||||
Names: []string{srcObj.GetName()},
|
||||
})
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
|
||||
_, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(RemoveReq{
|
||||
Dir: path.Dir(obj.GetPath()),
|
||||
Names: []string{obj.GetName()},
|
||||
})
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", d.Token)
|
||||
req.Header.Set("File-Path", path.Join(dstDir.GetPath(), s.GetName()))
|
||||
req.Header.Set("Password", d.MetaPassword)
|
||||
if md5 := s.GetHash().GetHash(utils.MD5); len(md5) > 0 {
|
||||
req.Header.Set("X-File-Md5", md5)
|
||||
}
|
||||
if sha1 := s.GetHash().GetHash(utils.SHA1); len(sha1) > 0 {
|
||||
req.Header.Set("X-File-Sha1", sha1)
|
||||
}
|
||||
if sha256 := s.GetHash().GetHash(utils.SHA256); len(sha256) > 0 {
|
||||
req.Header.Set("X-File-Sha256", sha256)
|
||||
}
|
||||
|
||||
req.ContentLength = s.GetSize()
|
||||
// client := base.NewHttpClient()
|
||||
// client.Timeout = time.Hour * 6
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bytes, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("[alist_v3] response body: %s", string(bytes))
|
||||
if res.StatusCode >= 400 {
|
||||
return fmt.Errorf("request failed, status: %s", res.Status)
|
||||
}
|
||||
code := utils.Json.Get(bytes, "code").ToInt()
|
||||
if code != 200 {
|
||||
if code == 401 || code == 403 {
|
||||
err = d.login()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(bytes, "message").ToString())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//func (d *AList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
var _ driver.Driver = (*AListV3)(nil)
|
@ -1,30 +0,0 @@
|
||||
package alist_v3
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
Address string `json:"url" required:"true"`
|
||||
MetaPassword string `json:"meta_password"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Token string `json:"token"`
|
||||
PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "AList V3",
|
||||
LocalSort: true,
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: true,
|
||||
ProxyRangeOption: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &AListV3{}
|
||||
})
|
||||
}
|
@ -1,83 +0,0 @@
|
||||
package alist_v3
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
|
||||
type ListReq struct {
|
||||
model.PageReq
|
||||
Path string `json:"path" form:"path"`
|
||||
Password string `json:"password" form:"password"`
|
||||
Refresh bool `json:"refresh"`
|
||||
}
|
||||
|
||||
type ObjResp struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
IsDir bool `json:"is_dir"`
|
||||
Modified time.Time `json:"modified"`
|
||||
Created time.Time `json:"created"`
|
||||
Sign string `json:"sign"`
|
||||
Thumb string `json:"thumb"`
|
||||
Type int `json:"type"`
|
||||
HashInfo string `json:"hashinfo"`
|
||||
}
|
||||
|
||||
type FsListResp struct {
|
||||
Content []ObjResp `json:"content"`
|
||||
Total int64 `json:"total"`
|
||||
Readme string `json:"readme"`
|
||||
Write bool `json:"write"`
|
||||
Provider string `json:"provider"`
|
||||
}
|
||||
|
||||
type FsGetReq struct {
|
||||
Path string `json:"path" form:"path"`
|
||||
Password string `json:"password" form:"password"`
|
||||
}
|
||||
|
||||
type FsGetResp struct {
|
||||
ObjResp
|
||||
RawURL string `json:"raw_url"`
|
||||
Readme string `json:"readme"`
|
||||
Provider string `json:"provider"`
|
||||
Related []ObjResp `json:"related"`
|
||||
}
|
||||
|
||||
type MkdirOrLinkReq struct {
|
||||
Path string `json:"path" form:"path"`
|
||||
}
|
||||
|
||||
type MoveCopyReq struct {
|
||||
SrcDir string `json:"src_dir"`
|
||||
DstDir string `json:"dst_dir"`
|
||||
Names []string `json:"names"`
|
||||
}
|
||||
|
||||
type RenameReq struct {
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type RemoveReq struct {
|
||||
Dir string `json:"dir"`
|
||||
Names []string `json:"names"`
|
||||
}
|
||||
|
||||
type LoginResp struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
type MeResp struct {
|
||||
Id int `json:"id"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
BasePath string `json:"base_path"`
|
||||
Role int `json:"role"`
|
||||
Disabled bool `json:"disabled"`
|
||||
Permission int `json:"permission"`
|
||||
SsoId string `json:"sso_id"`
|
||||
Otp bool `json:"otp"`
|
||||
}
|
@ -1,61 +0,0 @@
|
||||
package alist_v3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (d *AListV3) login() error {
|
||||
if d.Username == "" {
|
||||
return nil
|
||||
}
|
||||
var resp common.Resp[LoginResp]
|
||||
_, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetResult(&resp).SetBody(base.Json{
|
||||
"username": d.Username,
|
||||
"password": d.Password,
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Token = resp.Data.Token
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
|
||||
url := d.Address + "/api" + api
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeader("Authorization", d.Token)
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
res, err := req.Execute(method, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("[alist_v3] response body: %s", res.String())
|
||||
if res.StatusCode() >= 400 {
|
||||
return nil, fmt.Errorf("request failed, status: %s", res.Status())
|
||||
}
|
||||
code := utils.Json.Get(res.Body(), "code").ToInt()
|
||||
if code != 200 {
|
||||
if (code == 401 || code == 403) && !utils.IsBool(retry...) {
|
||||
err = d.login()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.request(api, method, callback, true)
|
||||
}
|
||||
return nil, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString())
|
||||
}
|
||||
return res.Body(), nil
|
||||
}
|
@ -1,76 +0,0 @@
|
||||
package drivers
|
||||
|
||||
import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/115"
|
||||
_ "github.com/alist-org/alist/v3/drivers/115_open"
|
||||
_ "github.com/alist-org/alist/v3/drivers/115_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/123"
|
||||
_ "github.com/alist-org/alist/v3/drivers/123_link"
|
||||
_ "github.com/alist-org/alist/v3/drivers/123_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/139"
|
||||
_ "github.com/alist-org/alist/v3/drivers/189"
|
||||
_ "github.com/alist-org/alist/v3/drivers/189pc"
|
||||
_ "github.com/alist-org/alist/v3/drivers/alias"
|
||||
_ "github.com/alist-org/alist/v3/drivers/alist_v2"
|
||||
_ "github.com/alist-org/alist/v3/drivers/alist_v3"
|
||||
_ "github.com/alist-org/alist/v3/drivers/aliyundrive"
|
||||
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_open"
|
||||
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/baidu_netdisk"
|
||||
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
|
||||
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/chaoxing"
|
||||
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
||||
_ "github.com/alist-org/alist/v3/drivers/crypt"
|
||||
_ "github.com/alist-org/alist/v3/drivers/dropbox"
|
||||
_ "github.com/alist-org/alist/v3/drivers/febbox"
|
||||
_ "github.com/alist-org/alist/v3/drivers/ftp"
|
||||
_ "github.com/alist-org/alist/v3/drivers/github"
|
||||
_ "github.com/alist-org/alist/v3/drivers/github_releases"
|
||||
_ "github.com/alist-org/alist/v3/drivers/google_drive"
|
||||
_ "github.com/alist-org/alist/v3/drivers/google_photo"
|
||||
_ "github.com/alist-org/alist/v3/drivers/halalcloud"
|
||||
_ "github.com/alist-org/alist/v3/drivers/ilanzou"
|
||||
_ "github.com/alist-org/alist/v3/drivers/ipfs_api"
|
||||
_ "github.com/alist-org/alist/v3/drivers/kodbox"
|
||||
_ "github.com/alist-org/alist/v3/drivers/lanzou"
|
||||
_ "github.com/alist-org/alist/v3/drivers/lenovonas_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/local"
|
||||
_ "github.com/alist-org/alist/v3/drivers/mediatrack"
|
||||
_ "github.com/alist-org/alist/v3/drivers/mega"
|
||||
_ "github.com/alist-org/alist/v3/drivers/misskey"
|
||||
_ "github.com/alist-org/alist/v3/drivers/mopan"
|
||||
_ "github.com/alist-org/alist/v3/drivers/netease_music"
|
||||
_ "github.com/alist-org/alist/v3/drivers/onedrive"
|
||||
_ "github.com/alist-org/alist/v3/drivers/onedrive_app"
|
||||
_ "github.com/alist-org/alist/v3/drivers/onedrive_sharelink"
|
||||
_ "github.com/alist-org/alist/v3/drivers/pikpak"
|
||||
_ "github.com/alist-org/alist/v3/drivers/pikpak_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/quark_uc"
|
||||
_ "github.com/alist-org/alist/v3/drivers/quark_uc_tv"
|
||||
_ "github.com/alist-org/alist/v3/drivers/quqi"
|
||||
_ "github.com/alist-org/alist/v3/drivers/s3"
|
||||
_ "github.com/alist-org/alist/v3/drivers/seafile"
|
||||
_ "github.com/alist-org/alist/v3/drivers/sftp"
|
||||
_ "github.com/alist-org/alist/v3/drivers/smb"
|
||||
_ "github.com/alist-org/alist/v3/drivers/teambition"
|
||||
_ "github.com/alist-org/alist/v3/drivers/terabox"
|
||||
_ "github.com/alist-org/alist/v3/drivers/thunder"
|
||||
_ "github.com/alist-org/alist/v3/drivers/thunder_browser"
|
||||
_ "github.com/alist-org/alist/v3/drivers/thunderx"
|
||||
_ "github.com/alist-org/alist/v3/drivers/trainbit"
|
||||
_ "github.com/alist-org/alist/v3/drivers/url_tree"
|
||||
_ "github.com/alist-org/alist/v3/drivers/uss"
|
||||
_ "github.com/alist-org/alist/v3/drivers/virtual"
|
||||
_ "github.com/alist-org/alist/v3/drivers/vtencent"
|
||||
_ "github.com/alist-org/alist/v3/drivers/webdav"
|
||||
_ "github.com/alist-org/alist/v3/drivers/weiyun"
|
||||
_ "github.com/alist-org/alist/v3/drivers/wopan"
|
||||
_ "github.com/alist-org/alist/v3/drivers/yandex_disk"
|
||||
)
|
||||
|
||||
// All do nothing,just for import
|
||||
// same as _ import
|
||||
func All() {
|
||||
|
||||
}
|
@ -1,251 +0,0 @@
|
||||
package baidu_share
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
type BaiduShare struct {
|
||||
model.Storage
|
||||
Addition
|
||||
client *resty.Client
|
||||
info struct {
|
||||
Root string
|
||||
Seckey string
|
||||
Shareid string
|
||||
Uk string
|
||||
}
|
||||
}
|
||||
|
||||
func (d *BaiduShare) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *BaiduShare) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *BaiduShare) Init(ctx context.Context) error {
|
||||
// TODO login / refresh token
|
||||
//op.MustSaveDriverStorage(d)
|
||||
d.client = resty.New().
|
||||
SetBaseURL("https://pan.baidu.com").
|
||||
SetHeader("User-Agent", "netdisk").
|
||||
SetCookie(&http.Cookie{Name: "BDUSS", Value: d.BDUSS}).
|
||||
SetCookie(&http.Cookie{Name: "ndut_fmt"})
|
||||
respJson := struct {
|
||||
Errno int64 `json:"errno"`
|
||||
Data struct {
|
||||
List [1]struct {
|
||||
Path string `json:"path"`
|
||||
} `json:"list"`
|
||||
Uk json.Number `json:"uk"`
|
||||
Shareid json.Number `json:"shareid"`
|
||||
Seckey string `json:"seckey"`
|
||||
} `json:"data"`
|
||||
}{}
|
||||
resp, err := d.client.R().
|
||||
SetBody(url.Values{
|
||||
"pwd": {d.Pwd},
|
||||
"root": {"1"},
|
||||
"shorturl": {d.Surl},
|
||||
}.Encode()).
|
||||
SetResult(&respJson).
|
||||
Post("share/wxlist?channel=weixin&version=2.2.2&clienttype=25&web=1")
|
||||
if err == nil {
|
||||
if resp.IsSuccess() && respJson.Errno == 0 {
|
||||
d.info.Root = path.Dir(respJson.Data.List[0].Path)
|
||||
d.info.Seckey = respJson.Data.Seckey
|
||||
d.info.Shareid = respJson.Data.Shareid.String()
|
||||
d.info.Uk = respJson.Data.Uk.String()
|
||||
} else {
|
||||
err = fmt.Errorf(" %s; %s; ", resp.Status(), resp.Body())
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *BaiduShare) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *BaiduShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
// TODO return the files list, required
|
||||
reqDir := dir.GetPath()
|
||||
isRoot := "0"
|
||||
if reqDir == d.RootFolderPath {
|
||||
reqDir = path.Join(d.info.Root, reqDir)
|
||||
}
|
||||
if reqDir == d.info.Root {
|
||||
isRoot = "1"
|
||||
}
|
||||
objs := []model.Obj{}
|
||||
var err error
|
||||
var page uint64 = 1
|
||||
more := true
|
||||
for more && err == nil {
|
||||
respJson := struct {
|
||||
Errno int64 `json:"errno"`
|
||||
Data struct {
|
||||
More bool `json:"has_more"`
|
||||
List []struct {
|
||||
Fsid json.Number `json:"fs_id"`
|
||||
Isdir json.Number `json:"isdir"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"server_filename"`
|
||||
Mtime json.Number `json:"server_mtime"`
|
||||
Size json.Number `json:"size"`
|
||||
} `json:"list"`
|
||||
} `json:"data"`
|
||||
}{}
|
||||
resp, e := d.client.R().
|
||||
SetBody(url.Values{
|
||||
"dir": {reqDir},
|
||||
"num": {"1000"},
|
||||
"order": {"time"},
|
||||
"page": {fmt.Sprint(page)},
|
||||
"pwd": {d.Pwd},
|
||||
"root": {isRoot},
|
||||
"shorturl": {d.Surl},
|
||||
}.Encode()).
|
||||
SetResult(&respJson).
|
||||
Post("share/wxlist?channel=weixin&version=2.2.2&clienttype=25&web=1")
|
||||
err = e
|
||||
if err == nil {
|
||||
if resp.IsSuccess() && respJson.Errno == 0 {
|
||||
page++
|
||||
more = respJson.Data.More
|
||||
for _, v := range respJson.Data.List {
|
||||
size, _ := v.Size.Int64()
|
||||
mtime, _ := v.Mtime.Int64()
|
||||
objs = append(objs, &model.Object{
|
||||
ID: v.Fsid.String(),
|
||||
Path: v.Path,
|
||||
Name: v.Name,
|
||||
Size: size,
|
||||
Modified: time.Unix(mtime, 0),
|
||||
IsFolder: v.Isdir.String() == "1",
|
||||
})
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf(" %s; %s; ", resp.Status(), resp.Body())
|
||||
}
|
||||
}
|
||||
}
|
||||
return objs, err
|
||||
}
|
||||
|
||||
func (d *BaiduShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
// TODO return link of file, required
|
||||
link := model.Link{Header: d.client.Header}
|
||||
sign := ""
|
||||
stamp := ""
|
||||
signJson := struct {
|
||||
Errno int64 `json:"errno"`
|
||||
Data struct {
|
||||
Stamp json.Number `json:"timestamp"`
|
||||
Sign string `json:"sign"`
|
||||
} `json:"data"`
|
||||
}{}
|
||||
resp, err := d.client.R().
|
||||
SetQueryParam("surl", d.Surl).
|
||||
SetResult(&signJson).
|
||||
Get("share/tplconfig?fields=sign,timestamp&channel=chunlei&web=1&app_id=250528&clienttype=0")
|
||||
if err == nil {
|
||||
if resp.IsSuccess() && signJson.Errno == 0 {
|
||||
stamp = signJson.Data.Stamp.String()
|
||||
sign = signJson.Data.Sign
|
||||
} else {
|
||||
err = fmt.Errorf(" %s; %s; ", resp.Status(), resp.Body())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
respJson := struct {
|
||||
Errno int64 `json:"errno"`
|
||||
List [1]struct {
|
||||
Dlink string `json:"dlink"`
|
||||
} `json:"list"`
|
||||
}{}
|
||||
resp, err = d.client.R().
|
||||
SetQueryParam("sign", sign).
|
||||
SetQueryParam("timestamp", stamp).
|
||||
SetBody(url.Values{
|
||||
"encrypt": {"0"},
|
||||
"extra": {fmt.Sprintf(`{"sekey":"%s"}`, d.info.Seckey)},
|
||||
"fid_list": {fmt.Sprintf("[%s]", file.GetID())},
|
||||
"primaryid": {d.info.Shareid},
|
||||
"product": {"share"},
|
||||
"type": {"nolimit"},
|
||||
"uk": {d.info.Uk},
|
||||
}.Encode()).
|
||||
SetResult(&respJson).
|
||||
Post("api/sharedownload?app_id=250528&channel=chunlei&clienttype=12&web=1")
|
||||
if err == nil {
|
||||
if resp.IsSuccess() && respJson.Errno == 0 && respJson.List[0].Dlink != "" {
|
||||
link.URL = respJson.List[0].Dlink
|
||||
} else {
|
||||
err = fmt.Errorf(" %s; %s; ", resp.Status(), resp.Body())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
resp, err = d.client.R().
|
||||
SetDoNotParseResponse(true).
|
||||
Get(link.URL)
|
||||
if err == nil {
|
||||
defer resp.RawBody().Close()
|
||||
if resp.IsError() {
|
||||
byt, _ := io.ReadAll(resp.RawBody())
|
||||
err = fmt.Errorf(" %s; %s; ", resp.Status(), byt)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return &link, err
|
||||
}
|
||||
|
||||
func (d *BaiduShare) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
// TODO create folder, optional
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *BaiduShare) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
// TODO move obj, optional
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *BaiduShare) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
// TODO rename obj, optional
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *BaiduShare) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
// TODO copy obj, optional
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *BaiduShare) Remove(ctx context.Context, obj model.Obj) error {
|
||||
// TODO remove obj, optional
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *BaiduShare) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
// TODO upload file, optional
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
var _ driver.Driver = (*BaiduShare)(nil)
|
@ -1,37 +0,0 @@
|
||||
package baidu_share
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
// Usually one of two
|
||||
driver.RootPath
|
||||
// driver.RootID
|
||||
// define other
|
||||
// Field string `json:"field" type:"select" required:"true" options:"a,b,c" default:"a"`
|
||||
Surl string `json:"surl"`
|
||||
Pwd string `json:"pwd"`
|
||||
BDUSS string `json:"BDUSS"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "BaiduShare",
|
||||
LocalSort: true,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: true,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &BaiduShare{}
|
||||
})
|
||||
}
|
@ -1 +0,0 @@
|
||||
package baidu_share
|
@ -1,3 +0,0 @@
|
||||
package baidu_share
|
||||
|
||||
// do others that not defined in Driver interface
|
@ -1,276 +0,0 @@
|
||||
package cloudreve
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/setting"
|
||||
"github.com/alist-org/alist/v3/pkg/cookie"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
json "github.com/json-iterator/go"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
const loginPath = "/user/session"
|
||||
|
||||
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
||||
u := d.Address + "/api/v3" + path
|
||||
ua := d.CustomUA
|
||||
if ua == "" {
|
||||
ua = base.UserAgent
|
||||
}
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"User-Agent": ua,
|
||||
})
|
||||
|
||||
var r Resp
|
||||
req.SetResult(&r)
|
||||
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
|
||||
resp, err := req.Execute(method, u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !resp.IsSuccess() {
|
||||
return errors.New(resp.String())
|
||||
}
|
||||
|
||||
if r.Code != 0 {
|
||||
|
||||
// 刷新 cookie
|
||||
if r.Code == http.StatusUnauthorized && path != loginPath {
|
||||
if d.Username != "" && d.Password != "" {
|
||||
err = d.login()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return d.request(method, path, callback, out)
|
||||
}
|
||||
}
|
||||
|
||||
return errors.New(r.Msg)
|
||||
}
|
||||
sess := cookie.GetCookie(resp.Cookies(), "cloudreve-session")
|
||||
if sess != nil {
|
||||
d.Cookie = sess.Value
|
||||
}
|
||||
if out != nil && r.Data != nil {
|
||||
var marshal []byte
|
||||
marshal, err = json.Marshal(r.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal(marshal, out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Cloudreve) login() error {
|
||||
var siteConfig Config
|
||||
err := d.request(http.MethodGet, "/site/config", nil, &siteConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
err = d.doLogin(siteConfig.LoginCaptcha)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if err.Error() != "CAPTCHA not match." {
|
||||
break
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Cloudreve) doLogin(needCaptcha bool) error {
|
||||
var captchaCode string
|
||||
var err error
|
||||
if needCaptcha {
|
||||
var captcha string
|
||||
err = d.request(http.MethodGet, "/site/captcha", nil, &captcha)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(captcha) == 0 {
|
||||
return errors.New("can not get captcha")
|
||||
}
|
||||
i := strings.Index(captcha, ",")
|
||||
dec := base64.NewDecoder(base64.StdEncoding, strings.NewReader(captcha[i+1:]))
|
||||
vRes, err := base.RestyClient.R().SetMultipartField(
|
||||
"image", "validateCode.png", "image/png", dec).
|
||||
Post(setting.GetStr(conf.OcrApi))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if jsoniter.Get(vRes.Body(), "status").ToInt() != 200 {
|
||||
return errors.New("ocr error:" + jsoniter.Get(vRes.Body(), "msg").ToString())
|
||||
}
|
||||
captchaCode = jsoniter.Get(vRes.Body(), "result").ToString()
|
||||
}
|
||||
var resp Resp
|
||||
err = d.request(http.MethodPost, loginPath, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"username": d.Addition.Username,
|
||||
"Password": d.Addition.Password,
|
||||
"captchaCode": captchaCode,
|
||||
})
|
||||
}, &resp)
|
||||
return err
|
||||
}
|
||||
|
||||
func convertSrc(obj model.Obj) map[string]interface{} {
|
||||
m := make(map[string]interface{})
|
||||
var dirs []string
|
||||
var items []string
|
||||
if obj.IsDir() {
|
||||
dirs = append(dirs, obj.GetID())
|
||||
} else {
|
||||
items = append(items, obj.GetID())
|
||||
}
|
||||
m["dirs"] = dirs
|
||||
m["items"] = items
|
||||
return m
|
||||
}
|
||||
|
||||
func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
|
||||
if !d.Addition.EnableThumbAndFolderSize {
|
||||
return model.Thumbnail{}, nil
|
||||
}
|
||||
ua := d.CustomUA
|
||||
if ua == "" {
|
||||
ua = base.UserAgent
|
||||
}
|
||||
req := base.NoRedirectClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||
"Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
|
||||
"User-Agent": ua,
|
||||
})
|
||||
resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id)
|
||||
if err != nil {
|
||||
return model.Thumbnail{}, err
|
||||
}
|
||||
return model.Thumbnail{
|
||||
Thumbnail: resp.Header().Get("Location"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||
uploadUrl := u.UploadURLs[0]
|
||||
credential := u.Credential
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
utils.Log.Debugf("[Cloudreve-Remote] upload: %d", finish)
|
||||
var byteSize = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Authorization", fmt.Sprint(credential))
|
||||
finish += byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||
uploadUrl := u.UploadURLs[0]
|
||||
var finish int64 = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
utils.Log.Debugf("[Cloudreve-OneDrive] upload: %d", finish)
|
||||
var byteSize = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
|
||||
finish += byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
_ = res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
// 上传成功发送回调请求
|
||||
err := d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
|
||||
req.SetBody("{}")
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,43 +0,0 @@
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultClientID = "76lrwrklhdn1icb"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
driver.RootPath
|
||||
|
||||
OauthTokenURL string `json:"oauth_token_url" default:"https://api.xhofe.top/alist/dropbox/token"`
|
||||
ClientID string `json:"client_id" required:"false" help:"Keep it empty if you don't have one"`
|
||||
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
|
||||
|
||||
AccessToken string
|
||||
RootNamespaceId string
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Dropbox",
|
||||
LocalSort: false,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "",
|
||||
NoOverwriteUpload: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Dropbox{
|
||||
base: "https://api.dropboxapi.com",
|
||||
contentBase: "https://content.dropboxapi.com",
|
||||
}
|
||||
})
|
||||
}
|
@ -1,116 +0,0 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
)
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
func (d *FTP) login() error {
|
||||
if d.conn != nil {
|
||||
_, err := d.conn.CurrentDir()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = conn.Login(d.Username, d.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.conn = conn
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileReader An FTP file reader that implements io.MFile for seeking.
|
||||
type FileReader struct {
|
||||
conn *ftp.ServerConn
|
||||
resp *ftp.Response
|
||||
offset atomic.Int64
|
||||
readAtOffset int64
|
||||
mu sync.Mutex
|
||||
path string
|
||||
size int64
|
||||
}
|
||||
|
||||
func NewFileReader(conn *ftp.ServerConn, path string, size int64) *FileReader {
|
||||
return &FileReader{
|
||||
conn: conn,
|
||||
path: path,
|
||||
size: size,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *FileReader) Read(buf []byte) (n int, err error) {
|
||||
n, err = r.ReadAt(buf, r.offset.Load())
|
||||
r.offset.Add(int64(n))
|
||||
return
|
||||
}
|
||||
|
||||
func (r *FileReader) ReadAt(buf []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return -1, os.ErrInvalid
|
||||
}
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if off != r.readAtOffset {
|
||||
//have to restart the connection, to correct offset
|
||||
_ = r.resp.Close()
|
||||
r.resp = nil
|
||||
}
|
||||
|
||||
if r.resp == nil {
|
||||
r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
|
||||
r.readAtOffset = off
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
n, err = r.resp.Read(buf)
|
||||
r.readAtOffset += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
oldOffset := r.offset.Load()
|
||||
var newOffset int64
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
newOffset = offset
|
||||
case io.SeekCurrent:
|
||||
newOffset = oldOffset + offset
|
||||
case io.SeekEnd:
|
||||
return r.size, nil
|
||||
default:
|
||||
return -1, os.ErrInvalid
|
||||
}
|
||||
|
||||
if newOffset < 0 {
|
||||
// offset out of range
|
||||
return oldOffset, os.ErrInvalid
|
||||
}
|
||||
if newOffset == oldOffset {
|
||||
// offset not changed, so return directly
|
||||
return oldOffset, nil
|
||||
}
|
||||
r.offset.Store(newOffset)
|
||||
return newOffset, nil
|
||||
}
|
||||
|
||||
func (r *FileReader) Close() error {
|
||||
if r.resp != nil {
|
||||
return r.resp.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,131 +0,0 @@
|
||||
package ipfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
shell "github.com/ipfs/go-ipfs-api"
|
||||
)
|
||||
|
||||
type IPFS struct {
|
||||
model.Storage
|
||||
Addition
|
||||
sh *shell.Shell
|
||||
gateURL *url.URL
|
||||
}
|
||||
|
||||
func (d *IPFS) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *IPFS) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *IPFS) Init(ctx context.Context) error {
|
||||
d.sh = shell.NewShell(d.Endpoint)
|
||||
gateURL, err := url.Parse(d.Gateway)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.gateURL = gateURL
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *IPFS) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *IPFS) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
path := dir.GetPath()
|
||||
if path[len(path):] != "/" {
|
||||
path += "/"
|
||||
}
|
||||
|
||||
path_cid, err := d.sh.FilesStat(ctx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dirs, err := d.sh.List(path_cid.Hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objlist := []model.Obj{}
|
||||
for _, file := range dirs {
|
||||
gateurl := *d.gateURL
|
||||
gateurl.Path = "ipfs/" + file.Hash
|
||||
gateurl.RawQuery = "filename=" + url.PathEscape(file.Name)
|
||||
objlist = append(objlist, &model.ObjectURL{
|
||||
Object: model.Object{ID: file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1},
|
||||
Url: model.Url{Url: gateurl.String()},
|
||||
})
|
||||
}
|
||||
|
||||
return objlist, nil
|
||||
}
|
||||
|
||||
func (d *IPFS) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
link := d.Gateway + "/ipfs/" + file.GetID() + "/?filename=" + url.PathEscape(file.GetName())
|
||||
return &model.Link{URL: link}, nil
|
||||
}
|
||||
|
||||
func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
path := parentDir.GetPath()
|
||||
if path[len(path):] != "/" {
|
||||
path += "/"
|
||||
}
|
||||
return d.sh.FilesMkdir(ctx, path+dirName)
|
||||
}
|
||||
|
||||
func (d *IPFS) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
return d.sh.FilesMv(ctx, srcObj.GetPath(), dstDir.GetPath())
|
||||
}
|
||||
|
||||
func (d *IPFS) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
newFileName := filepath.Dir(srcObj.GetPath()) + "/" + newName
|
||||
return d.sh.FilesMv(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/"))
|
||||
}
|
||||
|
||||
func (d *IPFS) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
// TODO copy obj, optional
|
||||
fmt.Println(srcObj.GetPath())
|
||||
fmt.Println(dstDir.GetPath())
|
||||
newFileName := dstDir.GetPath() + "/" + filepath.Base(srcObj.GetPath())
|
||||
fmt.Println(newFileName)
|
||||
return d.sh.FilesCp(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/"))
|
||||
}
|
||||
|
||||
func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error {
|
||||
// TODO remove obj, optional
|
||||
return d.sh.FilesRm(ctx, obj.GetPath(), true)
|
||||
}
|
||||
|
||||
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
// TODO upload file, optional
|
||||
_, err := d.sh.Add(driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
}), ToFiles(stdpath.Join(dstDir.GetPath(), s.GetName())))
|
||||
return err
|
||||
}
|
||||
|
||||
func ToFiles(dstDir string) shell.AddOpts {
|
||||
return func(rb *shell.RequestBuilder) error {
|
||||
rb.Option("to-files", dstDir)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
var _ driver.Driver = (*IPFS)(nil)
|
@ -1,8 +0,0 @@
|
||||
// +build linux darwin windows
|
||||
// +build amd64 arm64
|
||||
|
||||
package drivers
|
||||
|
||||
import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/lark"
|
||||
)
|
@ -1,403 +0,0 @@
|
||||
package lark
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
lark "github.com/larksuite/oapi-sdk-go/v3"
|
||||
larkcore "github.com/larksuite/oapi-sdk-go/v3/core"
|
||||
larkdrive "github.com/larksuite/oapi-sdk-go/v3/service/drive/v1"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type Lark struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
client *lark.Client
|
||||
rootFolderToken string
|
||||
}
|
||||
|
||||
func (c *Lark) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (c *Lark) GetAddition() driver.Additional {
|
||||
return &c.Addition
|
||||
}
|
||||
|
||||
func (c *Lark) Init(ctx context.Context) error {
|
||||
c.client = lark.NewClient(c.AppId, c.AppSecret, lark.WithTokenCache(newTokenCache()))
|
||||
|
||||
paths := strings.Split(c.RootFolderPath, "/")
|
||||
token := ""
|
||||
|
||||
var ok bool
|
||||
var file *larkdrive.File
|
||||
for _, p := range paths {
|
||||
if p == "" {
|
||||
token = ""
|
||||
continue
|
||||
}
|
||||
|
||||
resp, err := c.client.Drive.File.ListByIterator(ctx, larkdrive.NewListFileReqBuilder().FolderToken(token).Build())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
ok, file, err = resp.Next()
|
||||
if !ok {
|
||||
return errs.ObjectNotFound
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if *file.Type == "folder" && *file.Name == p {
|
||||
token = *file.Token
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.rootFolderToken = token
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Lark) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Lark) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
token, ok := c.getObjToken(ctx, dir.GetPath())
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
if token == emptyFolderToken {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
resp, err := c.client.Drive.File.ListByIterator(ctx, larkdrive.NewListFileReqBuilder().FolderToken(token).Build())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ok = false
|
||||
var file *larkdrive.File
|
||||
var res []model.Obj
|
||||
|
||||
for {
|
||||
ok, file, err = resp.Next()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
modifiedUnix, _ := strconv.ParseInt(*file.ModifiedTime, 10, 64)
|
||||
createdUnix, _ := strconv.ParseInt(*file.CreatedTime, 10, 64)
|
||||
|
||||
f := model.Object{
|
||||
ID: *file.Token,
|
||||
Path: strings.Join([]string{c.RootFolderPath, dir.GetPath(), *file.Name}, "/"),
|
||||
Name: *file.Name,
|
||||
Size: 0,
|
||||
Modified: time.Unix(modifiedUnix, 0),
|
||||
Ctime: time.Unix(createdUnix, 0),
|
||||
IsFolder: *file.Type == "folder",
|
||||
}
|
||||
res = append(res, &f)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (c *Lark) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
token, ok := c.getObjToken(ctx, file.GetPath())
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
resp, err := c.client.GetTenantAccessTokenBySelfBuiltApp(ctx, &larkcore.SelfBuiltTenantAccessTokenReq{
|
||||
AppID: c.AppId,
|
||||
AppSecret: c.AppSecret,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !c.ExternalMode {
|
||||
accessToken := resp.TenantAccessToken
|
||||
|
||||
url := fmt.Sprintf("https://open.feishu.cn/open-apis/drive/v1/files/%s/download", token)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", accessToken))
|
||||
req.Header.Set("Range", "bytes=0-1")
|
||||
|
||||
ar, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ar.StatusCode != http.StatusPartialContent {
|
||||
return nil, errors.New("failed to get download link")
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: url,
|
||||
Header: http.Header{
|
||||
"Authorization": []string{fmt.Sprintf("Bearer %s", accessToken)},
|
||||
},
|
||||
}, nil
|
||||
} else {
|
||||
url := strings.Join([]string{c.TenantUrlPrefix, "file", token}, "/")
|
||||
|
||||
return &model.Link{
|
||||
URL: url,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Lark) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
token, ok := c.getObjToken(ctx, parentDir.GetPath())
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
body, err := larkdrive.NewCreateFolderFilePathReqBodyBuilder().FolderToken(token).Name(dirName).Build()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.client.Drive.File.CreateFolder(ctx,
|
||||
larkdrive.NewCreateFolderFileReqBuilder().Body(body).Build())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !resp.Success() {
|
||||
return nil, errors.New(resp.Error())
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: *resp.Data.Token,
|
||||
Path: strings.Join([]string{c.RootFolderPath, parentDir.GetPath(), dirName}, "/"),
|
||||
Name: dirName,
|
||||
Size: 0,
|
||||
IsFolder: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Lark) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
srcToken, ok := c.getObjToken(ctx, srcObj.GetPath())
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
dstDirToken, ok := c.getObjToken(ctx, dstDir.GetPath())
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
req := larkdrive.NewMoveFileReqBuilder().
|
||||
Body(larkdrive.NewMoveFileReqBodyBuilder().
|
||||
Type("file").
|
||||
FolderToken(dstDirToken).
|
||||
Build()).FileToken(srcToken).
|
||||
Build()
|
||||
|
||||
// 发起请求
|
||||
resp, err := c.client.Drive.File.Move(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !resp.Success() {
|
||||
return nil, errors.New(resp.Error())
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *Lark) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
// TODO rename obj, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (c *Lark) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
srcToken, ok := c.getObjToken(ctx, srcObj.GetPath())
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
dstDirToken, ok := c.getObjToken(ctx, dstDir.GetPath())
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
req := larkdrive.NewCopyFileReqBuilder().
|
||||
Body(larkdrive.NewCopyFileReqBodyBuilder().
|
||||
Name(srcObj.GetName()).
|
||||
Type("file").
|
||||
FolderToken(dstDirToken).
|
||||
Build()).FileToken(srcToken).
|
||||
Build()
|
||||
|
||||
// 发起请求
|
||||
resp, err := c.client.Drive.File.Copy(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !resp.Success() {
|
||||
return nil, errors.New(resp.Error())
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *Lark) Remove(ctx context.Context, obj model.Obj) error {
|
||||
token, ok := c.getObjToken(ctx, obj.GetPath())
|
||||
if !ok {
|
||||
return errs.ObjectNotFound
|
||||
}
|
||||
|
||||
req := larkdrive.NewDeleteFileReqBuilder().
|
||||
FileToken(token).
|
||||
Type("file").
|
||||
Build()
|
||||
|
||||
// 发起请求
|
||||
resp, err := c.client.Drive.File.Delete(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !resp.Success() {
|
||||
return errors.New(resp.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var uploadLimit = rate.NewLimiter(rate.Every(time.Second), 5)
|
||||
|
||||
func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
token, ok := c.getObjToken(ctx, dstDir.GetPath())
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
// prepare
|
||||
req := larkdrive.NewUploadPrepareFileReqBuilder().
|
||||
FileUploadInfo(larkdrive.NewFileUploadInfoBuilder().
|
||||
FileName(stream.GetName()).
|
||||
ParentType(`explorer`).
|
||||
ParentNode(token).
|
||||
Size(int(stream.GetSize())).
|
||||
Build()).
|
||||
Build()
|
||||
|
||||
// 发起请求
|
||||
err := uploadLimit.Wait(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.client.Drive.File.UploadPrepare(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !resp.Success() {
|
||||
return nil, errors.New(resp.Error())
|
||||
}
|
||||
|
||||
uploadId := *resp.Data.UploadId
|
||||
blockSize := *resp.Data.BlockSize
|
||||
blockCount := *resp.Data.BlockNum
|
||||
|
||||
// upload
|
||||
for i := 0; i < blockCount; i++ {
|
||||
length := int64(blockSize)
|
||||
if i == blockCount-1 {
|
||||
length = stream.GetSize() - int64(i*blockSize)
|
||||
}
|
||||
|
||||
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, length))
|
||||
|
||||
req := larkdrive.NewUploadPartFileReqBuilder().
|
||||
Body(larkdrive.NewUploadPartFileReqBodyBuilder().
|
||||
UploadId(uploadId).
|
||||
Seq(i).
|
||||
Size(int(length)).
|
||||
File(reader).
|
||||
Build()).
|
||||
Build()
|
||||
|
||||
// 发起请求
|
||||
err = uploadLimit.Wait(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.client.Drive.File.UploadPart(ctx, req)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !resp.Success() {
|
||||
return nil, errors.New(resp.Error())
|
||||
}
|
||||
|
||||
up(float64(i) / float64(blockCount))
|
||||
}
|
||||
|
||||
//close
|
||||
closeReq := larkdrive.NewUploadFinishFileReqBuilder().
|
||||
Body(larkdrive.NewUploadFinishFileReqBodyBuilder().
|
||||
UploadId(uploadId).
|
||||
BlockNum(blockCount).
|
||||
Build()).
|
||||
Build()
|
||||
|
||||
// 发起请求
|
||||
closeResp, err := c.client.Drive.File.UploadFinish(ctx, closeReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !closeResp.Success() {
|
||||
return nil, errors.New(closeResp.Error())
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: *closeResp.Data.FileToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//func (d *Lark) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
var _ driver.Driver = (*Lark)(nil)
|
@ -1,36 +0,0 @@
|
||||
package lark
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
// Usually one of two
|
||||
driver.RootPath
|
||||
// define other
|
||||
AppId string `json:"app_id" type:"text" help:"app id"`
|
||||
AppSecret string `json:"app_secret" type:"text" help:"app secret"`
|
||||
ExternalMode bool `json:"external_mode" type:"bool" help:"external mode"`
|
||||
TenantUrlPrefix string `json:"tenant_url_prefix" type:"text" help:"tenant url prefix"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Lark",
|
||||
LocalSort: false,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Lark{}
|
||||
})
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
package lark
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Xhofe/go-cache"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TokenCache struct {
|
||||
cache.ICache[string]
|
||||
}
|
||||
|
||||
func (t *TokenCache) Set(_ context.Context, key string, value string, expireTime time.Duration) error {
|
||||
t.ICache.Set(key, value, cache.WithEx[string](expireTime))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TokenCache) Get(_ context.Context, key string) (string, error) {
|
||||
v, ok := t.ICache.Get(key)
|
||||
if ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func newTokenCache() *TokenCache {
|
||||
c := cache.NewMemCache[string]()
|
||||
|
||||
return &TokenCache{c}
|
||||
}
|
@ -1,66 +0,0 @@
|
||||
package lark
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Xhofe/go-cache"
|
||||
larkdrive "github.com/larksuite/oapi-sdk-go/v3/service/drive/v1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
const objTokenCacheDuration = 5 * time.Minute
|
||||
const emptyFolderToken = "empty"
|
||||
|
||||
var objTokenCache = cache.NewMemCache[string]()
|
||||
var exOpts = cache.WithEx[string](objTokenCacheDuration)
|
||||
|
||||
func (c *Lark) getObjToken(ctx context.Context, folderPath string) (string, bool) {
|
||||
if token, ok := objTokenCache.Get(folderPath); ok {
|
||||
return token, true
|
||||
}
|
||||
|
||||
dir, name := path.Split(folderPath)
|
||||
// strip the last slash of dir if it exists
|
||||
if len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||
dir = dir[:len(dir)-1]
|
||||
}
|
||||
if name == "" {
|
||||
return c.rootFolderToken, true
|
||||
}
|
||||
|
||||
var parentToken string
|
||||
var found bool
|
||||
parentToken, found = c.getObjToken(ctx, dir)
|
||||
if !found {
|
||||
return emptyFolderToken, false
|
||||
}
|
||||
|
||||
req := larkdrive.NewListFileReqBuilder().FolderToken(parentToken).Build()
|
||||
resp, err := c.client.Drive.File.ListByIterator(ctx, req)
|
||||
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to list files")
|
||||
return emptyFolderToken, false
|
||||
}
|
||||
|
||||
var file *larkdrive.File
|
||||
for {
|
||||
found, file, err = resp.Next()
|
||||
if !found {
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to get next file")
|
||||
break
|
||||
}
|
||||
|
||||
if *file.Name == name {
|
||||
objTokenCache.Set(folderPath, *file.Token, exOpts)
|
||||
return *file.Token, true
|
||||
}
|
||||
}
|
||||
|
||||
return emptyFolderToken, false
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
package LenovoNasShare
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
ShareId string `json:"share_id" required:"true" help:"The part after the last / in the shared link"`
|
||||
SharePwd string `json:"share_pwd" required:"true" help:"The password of the shared link"`
|
||||
Host string `json:"host" required:"true" default:"https://siot-share.lenovo.com.cn" help:"You can change it to your local area network"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "LenovoNasShare",
|
||||
LocalSort: true,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: true,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &LenovoNasShare{}
|
||||
})
|
||||
}
|
@ -1,161 +0,0 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/disintegration/imaging"
|
||||
ffmpeg "github.com/u2takey/ffmpeg-go"
|
||||
)
|
||||
|
||||
func isSymlinkDir(f fs.FileInfo, path string) bool {
|
||||
if f.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
dst, err := os.Readlink(filepath.Join(path, f.Name()))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if !filepath.IsAbs(dst) {
|
||||
dst = filepath.Join(path, dst)
|
||||
}
|
||||
stat, err := os.Stat(dst)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return stat.IsDir()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Get the snapshot of the video
|
||||
func (d *Local) GetSnapshot(videoPath string) (imgData *bytes.Buffer, err error) {
|
||||
// Run ffprobe to get the video duration
|
||||
jsonOutput, err := ffmpeg.Probe(videoPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// get format.duration from the json string
|
||||
type probeFormat struct {
|
||||
Duration string `json:"duration"`
|
||||
}
|
||||
type probeData struct {
|
||||
Format probeFormat `json:"format"`
|
||||
}
|
||||
var probe probeData
|
||||
err = json.Unmarshal([]byte(jsonOutput), &probe)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalDuration, err := strconv.ParseFloat(probe.Format.Duration, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ss string
|
||||
if strings.HasSuffix(d.VideoThumbPos, "%") {
|
||||
percentage, err := strconv.ParseFloat(strings.TrimSuffix(d.VideoThumbPos, "%"), 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ss = fmt.Sprintf("%f", totalDuration*percentage/100)
|
||||
} else {
|
||||
val, err := strconv.ParseFloat(d.VideoThumbPos, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If the value is greater than the total duration, use the total duration
|
||||
if val > totalDuration {
|
||||
ss = fmt.Sprintf("%f", totalDuration)
|
||||
} else {
|
||||
ss = d.VideoThumbPos
|
||||
}
|
||||
}
|
||||
|
||||
// Run ffmpeg to get the snapshot
|
||||
srcBuf := bytes.NewBuffer(nil)
|
||||
// If the remaining time from the seek point to the end of the video is less
|
||||
// than the duration of a single frame, ffmpeg cannot extract any frames
|
||||
// within the specified range and will exit with an error.
|
||||
// The "noaccurate_seek" option prevents this error and would also speed up
|
||||
// the seek process.
|
||||
stream := ffmpeg.Input(videoPath, ffmpeg.KwArgs{"ss": ss, "noaccurate_seek": ""}).
|
||||
Output("pipe:", ffmpeg.KwArgs{"vframes": 1, "format": "image2", "vcodec": "mjpeg"}).
|
||||
GlobalArgs("-loglevel", "error").Silent(true).
|
||||
WithOutput(srcBuf, os.Stdout)
|
||||
if err = stream.Run(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return srcBuf, nil
|
||||
}
|
||||
|
||||
func readDir(dirname string) ([]fs.FileInfo, error) {
|
||||
f, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list, err := f.Readdir(-1)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sort.Slice(list, func(i, j int) bool { return list[i].Name() < list[j].Name() })
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
|
||||
fullPath := file.GetPath()
|
||||
thumbPrefix := "alist_thumb_"
|
||||
thumbName := thumbPrefix + utils.GetMD5EncodeStr(fullPath) + ".png"
|
||||
if d.ThumbCacheFolder != "" {
|
||||
// skip if the file is a thumbnail
|
||||
if strings.HasPrefix(file.GetName(), thumbPrefix) {
|
||||
return nil, &fullPath, nil
|
||||
}
|
||||
thumbPath := filepath.Join(d.ThumbCacheFolder, thumbName)
|
||||
if utils.Exists(thumbPath) {
|
||||
return nil, &thumbPath, nil
|
||||
}
|
||||
}
|
||||
var srcBuf *bytes.Buffer
|
||||
if utils.GetFileType(file.GetName()) == conf.VIDEO {
|
||||
videoBuf, err := d.GetSnapshot(fullPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
srcBuf = videoBuf
|
||||
} else {
|
||||
imgData, err := os.ReadFile(fullPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
imgBuf := bytes.NewBuffer(imgData)
|
||||
srcBuf = imgBuf
|
||||
}
|
||||
|
||||
image, err := imaging.Decode(srcBuf, imaging.AutoOrientation(true))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
thumbImg := imaging.Resize(image, 144, 0, imaging.Lanczos)
|
||||
var buf bytes.Buffer
|
||||
err = imaging.Encode(&buf, thumbImg, imaging.PNG)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if d.ThumbCacheFolder != "" {
|
||||
err = os.WriteFile(filepath.Join(d.ThumbCacheFolder, thumbName), buf.Bytes(), 0666)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return &buf, nil, nil
|
||||
}
|
@ -1,452 +0,0 @@
|
||||
package quqi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Quqi struct {
|
||||
model.Storage
|
||||
Addition
|
||||
Cookie string // Cookie
|
||||
GroupID string // 私人云群组ID
|
||||
ClientID string // 随机生成客户端ID 经过测试,部分接口调用若不携带client id会出现错误
|
||||
}
|
||||
|
||||
func (d *Quqi) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Quqi) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Quqi) Init(ctx context.Context) error {
|
||||
// 登录
|
||||
if err := d.login(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 生成随机client id (与网页端生成逻辑一致)
|
||||
d.ClientID = "quqipc_" + random.String(10)
|
||||
|
||||
// 获取私人云ID (暂时仅获取私人云)
|
||||
groupResp := &GroupRes{}
|
||||
if _, err := d.request("group.quqi.com", "/v1/group/list", resty.MethodGet, nil, groupResp); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, groupInfo := range groupResp.Data {
|
||||
if groupInfo == nil {
|
||||
continue
|
||||
}
|
||||
if groupInfo.Type == 2 {
|
||||
d.GroupID = strconv.Itoa(groupInfo.ID)
|
||||
break
|
||||
}
|
||||
}
|
||||
if d.GroupID == "" {
|
||||
return errs.StorageNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Quqi) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Quqi) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
var (
|
||||
listResp = &ListRes{}
|
||||
files []model.Obj
|
||||
)
|
||||
|
||||
if _, err := d.request("", "/api/dir/ls", resty.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
"quqi_id": d.GroupID,
|
||||
"tree_id": "1",
|
||||
"node_id": dir.GetID(),
|
||||
"client_id": d.ClientID,
|
||||
})
|
||||
}, listResp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if listResp.Data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// dirs
|
||||
for _, dirInfo := range listResp.Data.Dir {
|
||||
if dirInfo == nil {
|
||||
continue
|
||||
}
|
||||
files = append(files, &model.Object{
|
||||
ID: strconv.FormatInt(dirInfo.NodeID, 10),
|
||||
Name: dirInfo.Name,
|
||||
Modified: time.Unix(dirInfo.UpdateTime, 0),
|
||||
Ctime: time.Unix(dirInfo.AddTime, 0),
|
||||
IsFolder: true,
|
||||
})
|
||||
}
|
||||
|
||||
// files
|
||||
for _, fileInfo := range listResp.Data.File {
|
||||
if fileInfo == nil {
|
||||
continue
|
||||
}
|
||||
if fileInfo.EXT != "" {
|
||||
fileInfo.Name = strings.Join([]string{fileInfo.Name, fileInfo.EXT}, ".")
|
||||
}
|
||||
|
||||
files = append(files, &model.Object{
|
||||
ID: strconv.FormatInt(fileInfo.NodeID, 10),
|
||||
Name: fileInfo.Name,
|
||||
Size: fileInfo.Size,
|
||||
Modified: time.Unix(fileInfo.UpdateTime, 0),
|
||||
Ctime: time.Unix(fileInfo.AddTime, 0),
|
||||
})
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *Quqi) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if d.CDN {
|
||||
link, err := d.linkFromCDN(file.GetID())
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
} else {
|
||||
return link, nil
|
||||
}
|
||||
}
|
||||
|
||||
link, err := d.linkFromPreview(file.GetID())
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
} else {
|
||||
return link, nil
|
||||
}
|
||||
|
||||
link, err = d.linkFromDownload(file.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
|
||||
func (d *Quqi) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
var (
|
||||
makeDirRes = &MakeDirRes{}
|
||||
timeNow = time.Now()
|
||||
)
|
||||
|
||||
if _, err := d.request("", "/api/dir/mkDir", resty.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
"quqi_id": d.GroupID,
|
||||
"tree_id": "1",
|
||||
"parent_id": parentDir.GetID(),
|
||||
"name": dirName,
|
||||
"client_id": d.ClientID,
|
||||
})
|
||||
}, makeDirRes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: strconv.FormatInt(makeDirRes.Data.NodeID, 10),
|
||||
Name: dirName,
|
||||
Modified: timeNow,
|
||||
Ctime: timeNow,
|
||||
IsFolder: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Quqi) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var moveRes = &MoveRes{}
|
||||
|
||||
if _, err := d.request("", "/api/dir/mvDir", resty.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
"quqi_id": d.GroupID,
|
||||
"tree_id": "1",
|
||||
"node_id": dstDir.GetID(),
|
||||
"source_quqi_id": d.GroupID,
|
||||
"source_tree_id": "1",
|
||||
"source_node_id": srcObj.GetID(),
|
||||
"client_id": d.ClientID,
|
||||
})
|
||||
}, moveRes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: strconv.FormatInt(moveRes.Data.NodeID, 10),
|
||||
Name: moveRes.Data.NodeName,
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: time.Now(),
|
||||
Ctime: srcObj.CreateTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Quqi) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
var realName = newName
|
||||
|
||||
if !srcObj.IsDir() {
|
||||
srcExt, newExt := utils.Ext(srcObj.GetName()), utils.Ext(newName)
|
||||
|
||||
// 曲奇网盘的文件名称由文件名和扩展名组成,若存在扩展名,则重命名时仅支持更改文件名,扩展名在曲奇服务端保留
|
||||
if srcExt != "" && srcExt == newExt {
|
||||
parts := strings.Split(newName, ".")
|
||||
if len(parts) > 1 {
|
||||
realName = strings.Join(parts[:len(parts)-1], ".")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := d.request("", "/api/dir/renameDir", resty.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
"quqi_id": d.GroupID,
|
||||
"tree_id": "1",
|
||||
"node_id": srcObj.GetID(),
|
||||
"rename": realName,
|
||||
"client_id": d.ClientID,
|
||||
})
|
||||
}, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: srcObj.GetID(),
|
||||
Name: newName,
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: time.Now(),
|
||||
Ctime: srcObj.CreateTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Quqi) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
// 无法从曲奇接口响应中直接获取复制后的文件信息
|
||||
if _, err := d.request("", "/api/node/copy", resty.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
"quqi_id": d.GroupID,
|
||||
"tree_id": "1",
|
||||
"node_id": dstDir.GetID(),
|
||||
"source_quqi_id": d.GroupID,
|
||||
"source_tree_id": "1",
|
||||
"source_node_id": srcObj.GetID(),
|
||||
"client_id": d.ClientID,
|
||||
})
|
||||
}, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *Quqi) Remove(ctx context.Context, obj model.Obj) error {
|
||||
// 暂时不做直接删除,默认都放到回收站。直接删除方法:先调用删除接口放入回收站,在通过回收站接口删除文件
|
||||
if _, err := d.request("", "/api/node/del", resty.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
"quqi_id": d.GroupID,
|
||||
"tree_id": "1",
|
||||
"node_id": obj.GetID(),
|
||||
"client_id": d.ClientID,
|
||||
})
|
||||
}, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// base info
|
||||
sizeStr := strconv.FormatInt(stream.GetSize(), 10)
|
||||
f, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
md5, err := utils.HashFile(utils.MD5, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sha, err := utils.HashFile(utils.SHA256, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// init upload
|
||||
var uploadInitResp UploadInitResp
|
||||
_, err = d.request("", "/api/upload/v1/file/init", resty.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
"quqi_id": d.GroupID,
|
||||
"tree_id": "1",
|
||||
"parent_id": dstDir.GetID(),
|
||||
"size": sizeStr,
|
||||
"file_name": stream.GetName(),
|
||||
"md5": md5,
|
||||
"sha": sha,
|
||||
"is_slice": "true",
|
||||
"client_id": d.ClientID,
|
||||
})
|
||||
}, &uploadInitResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// check exist
|
||||
// if the file already exists in Quqi server, there is no need to actually upload it
|
||||
if uploadInitResp.Data.Exist {
|
||||
// the file name returned by Quqi does not include the extension name
|
||||
nodeName, nodeExt := uploadInitResp.Data.NodeName, rawExt(stream.GetName())
|
||||
if nodeExt != "" {
|
||||
nodeName = nodeName + "." + nodeExt
|
||||
}
|
||||
return &model.Object{
|
||||
ID: strconv.FormatInt(uploadInitResp.Data.NodeID, 10),
|
||||
Name: nodeName,
|
||||
Size: stream.GetSize(),
|
||||
Modified: stream.ModTime(),
|
||||
Ctime: stream.CreateTime(),
|
||||
}, nil
|
||||
}
|
||||
// listParts
|
||||
_, err = d.request("upload.quqi.com:20807", "/upload/v1/listParts", resty.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
"token": uploadInitResp.Data.Token,
|
||||
"task_id": uploadInitResp.Data.TaskID,
|
||||
"client_id": d.ClientID,
|
||||
})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// get temp key
|
||||
var tempKeyResp TempKeyResp
|
||||
_, err = d.request("upload.quqi.com:20807", "/upload/v1/tempKey", resty.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"token": uploadInitResp.Data.Token,
|
||||
"task_id": uploadInitResp.Data.TaskID,
|
||||
})
|
||||
}, &tempKeyResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// upload
|
||||
// u, err := url.Parse(fmt.Sprintf("https://%s.cos.ap-shanghai.myqcloud.com", uploadInitResp.Data.Bucket))
|
||||
// b := &cos.BaseURL{BucketURL: u}
|
||||
// client := cos.NewClient(b, &http.Client{
|
||||
// Transport: &cos.CredentialTransport{
|
||||
// Credential: cos.NewTokenCredential(tempKeyResp.Data.Credentials.TmpSecretID, tempKeyResp.Data.Credentials.TmpSecretKey, tempKeyResp.Data.Credentials.SessionToken),
|
||||
// },
|
||||
// })
|
||||
// partSize := int64(1024 * 1024 * 2)
|
||||
// partCount := (stream.GetSize() + partSize - 1) / partSize
|
||||
// for i := 1; i <= int(partCount); i++ {
|
||||
// length := partSize
|
||||
// if i == int(partCount) {
|
||||
// length = stream.GetSize() - (int64(i)-1)*partSize
|
||||
// }
|
||||
// _, err := client.Object.UploadPart(
|
||||
// ctx, uploadInitResp.Data.Key, uploadInitResp.Data.UploadID, i, io.LimitReader(f, partSize), &cos.ObjectUploadPartOptions{
|
||||
// ContentLength: length,
|
||||
// },
|
||||
// )
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// }
|
||||
|
||||
cfg := &aws.Config{
|
||||
Credentials: credentials.NewStaticCredentials(tempKeyResp.Data.Credentials.TmpSecretID, tempKeyResp.Data.Credentials.TmpSecretKey, tempKeyResp.Data.Credentials.SessionToken),
|
||||
Region: aws.String("ap-shanghai"),
|
||||
Endpoint: aws.String("cos.ap-shanghai.myqcloud.com"),
|
||||
}
|
||||
s, err := session.NewSession(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uploader := s3manager.NewUploader(s)
|
||||
buf := make([]byte, 1024*1024*2)
|
||||
fup := &driver.ReaderUpdatingProgress{
|
||||
Reader: &driver.SimpleReaderWithSize{
|
||||
Reader: f,
|
||||
Size: int64(len(buf)),
|
||||
},
|
||||
UpdateProgress: up,
|
||||
}
|
||||
for partNumber := int64(1); ; partNumber++ {
|
||||
n, err := io.ReadFull(fup, buf)
|
||||
if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
reader := bytes.NewReader(buf[:n])
|
||||
_, err = uploader.S3.UploadPartWithContext(ctx, &s3.UploadPartInput{
|
||||
UploadId: &uploadInitResp.Data.UploadID,
|
||||
Key: &uploadInitResp.Data.Key,
|
||||
Bucket: &uploadInitResp.Data.Bucket,
|
||||
PartNumber: aws.Int64(partNumber),
|
||||
Body: struct {
|
||||
*driver.RateLimitReader
|
||||
io.Seeker
|
||||
}{
|
||||
RateLimitReader: driver.NewLimitedUploadStream(ctx, reader),
|
||||
Seeker: reader,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// finish upload
|
||||
var uploadFinishResp UploadFinishResp
|
||||
_, err = d.request("", "/api/upload/v1/file/finish", resty.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
"token": uploadInitResp.Data.Token,
|
||||
"task_id": uploadInitResp.Data.TaskID,
|
||||
"client_id": d.ClientID,
|
||||
})
|
||||
}, &uploadFinishResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// the file name returned by Quqi does not include the extension name
|
||||
nodeName, nodeExt := uploadFinishResp.Data.NodeName, rawExt(stream.GetName())
|
||||
if nodeExt != "" {
|
||||
nodeName = nodeName + "." + nodeExt
|
||||
}
|
||||
return &model.Object{
|
||||
ID: strconv.FormatInt(uploadFinishResp.Data.NodeID, 10),
|
||||
Name: nodeName,
|
||||
Size: stream.GetSize(),
|
||||
Modified: stream.ModTime(),
|
||||
Ctime: stream.CreateTime(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
var _ driver.Driver = (*Quqi)(nil)
|
@ -1,28 +0,0 @@
|
||||
package quqi
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
Phone string `json:"phone"`
|
||||
Password string `json:"password"`
|
||||
Cookie string `json:"cookie" help:"Cookie can be used on multiple clients at the same time"`
|
||||
CDN bool `json:"cdn" help:"If you enable this option, the download speed can be increased, but there will be some performance loss"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Quqi",
|
||||
OnlyLocal: true,
|
||||
LocalSort: true,
|
||||
//NoUpload: true,
|
||||
DefaultRoot: "0",
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Quqi{}
|
||||
})
|
||||
}
|
@ -1,197 +0,0 @@
|
||||
package quqi
|
||||
|
||||
type BaseReqQuery struct {
|
||||
ID string `json:"quqiid"`
|
||||
}
|
||||
|
||||
type BaseReq struct {
|
||||
GroupID string `json:"quqi_id"`
|
||||
}
|
||||
|
||||
type BaseRes struct {
|
||||
//Data interface{} `json:"data"`
|
||||
Code int `json:"err"`
|
||||
Message string `json:"msg"`
|
||||
}
|
||||
|
||||
type GroupRes struct {
|
||||
BaseRes
|
||||
Data []*Group `json:"data"`
|
||||
}
|
||||
|
||||
type ListRes struct {
|
||||
BaseRes
|
||||
Data *List `json:"data"`
|
||||
}
|
||||
|
||||
type GetDocRes struct {
|
||||
BaseRes
|
||||
Data struct {
|
||||
OriginPath string `json:"origin_path"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type GetDownloadResp struct {
|
||||
BaseRes
|
||||
Data struct {
|
||||
Url string `json:"url"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type MakeDirRes struct {
|
||||
BaseRes
|
||||
Data struct {
|
||||
IsRoot bool `json:"is_root"`
|
||||
NodeID int64 `json:"node_id"`
|
||||
ParentID int64 `json:"parent_id"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type MoveRes struct {
|
||||
BaseRes
|
||||
Data struct {
|
||||
NodeChildNum int64 `json:"node_child_num"`
|
||||
NodeID int64 `json:"node_id"`
|
||||
NodeName string `json:"node_name"`
|
||||
ParentID int64 `json:"parent_id"`
|
||||
GroupID int64 `json:"quqi_id"`
|
||||
TreeID int64 `json:"tree_id"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type RenameRes struct {
|
||||
BaseRes
|
||||
Data struct {
|
||||
NodeID int64 `json:"node_id"`
|
||||
GroupID int64 `json:"quqi_id"`
|
||||
Rename string `json:"rename"`
|
||||
TreeID int64 `json:"tree_id"`
|
||||
UpdateTime int64 `json:"updatetime"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type CopyRes struct {
|
||||
BaseRes
|
||||
}
|
||||
|
||||
type RemoveRes struct {
|
||||
BaseRes
|
||||
}
|
||||
|
||||
type Group struct {
|
||||
ID int `json:"quqi_id"`
|
||||
Type int `json:"type"`
|
||||
Name string `json:"name"`
|
||||
IsAdministrator int `json:"is_administrator"`
|
||||
Role int `json:"role"`
|
||||
Avatar string `json:"avatar_url"`
|
||||
IsStick int `json:"is_stick"`
|
||||
Nickname string `json:"nickname"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
type List struct {
|
||||
ListDir
|
||||
Dir []*ListDir `json:"dir"`
|
||||
File []*ListFile `json:"file"`
|
||||
}
|
||||
|
||||
type ListItem struct {
|
||||
AddTime int64 `json:"add_time"`
|
||||
IsDir int `json:"is_dir"`
|
||||
IsExpand int `json:"is_expand"`
|
||||
IsFinalize int `json:"is_finalize"`
|
||||
LastEditorName string `json:"last_editor_name"`
|
||||
Name string `json:"name"`
|
||||
NodeID int64 `json:"nid"`
|
||||
ParentID int64 `json:"parent_id"`
|
||||
Permission int `json:"permission"`
|
||||
TreeID int64 `json:"tid"`
|
||||
UpdateCNT int64 `json:"update_cnt"`
|
||||
UpdateTime int64 `json:"update_time"`
|
||||
}
|
||||
|
||||
type ListDir struct {
|
||||
ListItem
|
||||
ChildDocNum int64 `json:"child_doc_num"`
|
||||
DirDetail string `json:"dir_detail"`
|
||||
DirType int `json:"dir_type"`
|
||||
}
|
||||
|
||||
type ListFile struct {
|
||||
ListItem
|
||||
BroadDocType string `json:"broad_doc_type"`
|
||||
CanDisplay bool `json:"can_display"`
|
||||
Detail string `json:"detail"`
|
||||
EXT string `json:"ext"`
|
||||
Filetype string `json:"filetype"`
|
||||
HasMobileThumbnail bool `json:"has_mobile_thumbnail"`
|
||||
HasThumbnail bool `json:"has_thumbnail"`
|
||||
Size int64 `json:"size"`
|
||||
Version int `json:"version"`
|
||||
}
|
||||
|
||||
type UploadInitResp struct {
|
||||
Data struct {
|
||||
Bucket string `json:"bucket"`
|
||||
Exist bool `json:"exist"`
|
||||
Key string `json:"key"`
|
||||
TaskID string `json:"task_id"`
|
||||
Token string `json:"token"`
|
||||
UploadID string `json:"upload_id"`
|
||||
URL string `json:"url"`
|
||||
NodeID int64 `json:"node_id"`
|
||||
NodeName string `json:"node_name"`
|
||||
ParentID int64 `json:"parent_id"`
|
||||
} `json:"data"`
|
||||
Err int `json:"err"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
type TempKeyResp struct {
|
||||
Err int `json:"err"`
|
||||
Msg string `json:"msg"`
|
||||
Data struct {
|
||||
ExpiredTime int `json:"expiredTime"`
|
||||
Expiration string `json:"expiration"`
|
||||
Credentials struct {
|
||||
SessionToken string `json:"sessionToken"`
|
||||
TmpSecretID string `json:"tmpSecretId"`
|
||||
TmpSecretKey string `json:"tmpSecretKey"`
|
||||
} `json:"credentials"`
|
||||
RequestID string `json:"requestId"`
|
||||
StartTime int `json:"startTime"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UploadFinishResp struct {
|
||||
Data struct {
|
||||
NodeID int64 `json:"node_id"`
|
||||
NodeName string `json:"node_name"`
|
||||
ParentID int64 `json:"parent_id"`
|
||||
QuqiID int64 `json:"quqi_id"`
|
||||
TreeID int64 `json:"tree_id"`
|
||||
} `json:"data"`
|
||||
Err int `json:"err"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
type UrlExchangeResp struct {
|
||||
BaseRes
|
||||
Data struct {
|
||||
Name string `json:"name"`
|
||||
Mime string `json:"mime"`
|
||||
Size int64 `json:"size"`
|
||||
DownloadType int `json:"download_type"`
|
||||
ChannelType int `json:"channel_type"`
|
||||
ChannelID int `json:"channel_id"`
|
||||
Url string `json:"url"`
|
||||
ExpiredTime int64 `json:"expired_time"`
|
||||
IsEncrypted bool `json:"is_encrypted"`
|
||||
EncryptedSize int64 `json:"encrypted_size"`
|
||||
EncryptedAlg string `json:"encrypted_alg"`
|
||||
EncryptedKey string `json:"encrypted_key"`
|
||||
PassportID int64 `json:"passport_id"`
|
||||
RequestExpiredTime int64 `json:"request_expired_time"`
|
||||
} `json:"data"`
|
||||
}
|
@ -1,310 +0,0 @@
|
||||
package quqi
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/minio/sio"
|
||||
)
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
func (d *Quqi) request(host string, path string, method string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
||||
var (
|
||||
reqUrl = url.URL{
|
||||
Scheme: "https",
|
||||
Host: "quqi.com",
|
||||
Path: path,
|
||||
}
|
||||
req = base.RestyClient.R()
|
||||
result BaseRes
|
||||
)
|
||||
|
||||
if host != "" {
|
||||
reqUrl.Host = host
|
||||
}
|
||||
req.SetHeaders(map[string]string{
|
||||
"Origin": "https://quqi.com",
|
||||
"Cookie": d.Cookie,
|
||||
})
|
||||
|
||||
if d.GroupID != "" {
|
||||
req.SetQueryParam("quqiid", d.GroupID)
|
||||
}
|
||||
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
|
||||
res, err := req.Execute(method, reqUrl.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// resty.Request.SetResult cannot parse result correctly sometimes
|
||||
err = utils.Json.Unmarshal(res.Body(), &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Code != 0 {
|
||||
return nil, errors.New(result.Message)
|
||||
}
|
||||
if resp != nil {
|
||||
err = utils.Json.Unmarshal(res.Body(), resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (d *Quqi) login() error {
|
||||
if d.Addition.Cookie != "" {
|
||||
d.Cookie = d.Addition.Cookie
|
||||
}
|
||||
if d.checkLogin() {
|
||||
return nil
|
||||
}
|
||||
if d.Cookie != "" {
|
||||
return errors.New("cookie is invalid")
|
||||
}
|
||||
if d.Phone == "" {
|
||||
return errors.New("phone number is empty")
|
||||
}
|
||||
if d.Password == "" {
|
||||
return errs.EmptyPassword
|
||||
}
|
||||
|
||||
resp, err := d.request("", "/auth/person/v2/login/password", resty.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
"phone": d.Phone,
|
||||
"password": base64.StdEncoding.EncodeToString([]byte(d.Password)),
|
||||
})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var cookies []string
|
||||
for _, cookie := range resp.RawResponse.Cookies() {
|
||||
cookies = append(cookies, fmt.Sprintf("%s=%s", cookie.Name, cookie.Value))
|
||||
}
|
||||
d.Cookie = strings.Join(cookies, ";")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Quqi) checkLogin() bool {
|
||||
if _, err := d.request("", "/auth/account/baseInfo", resty.MethodGet, nil, nil); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// rawExt 保留扩展名大小写
|
||||
func rawExt(name string) string {
|
||||
ext := stdpath.Ext(name)
|
||||
if strings.HasPrefix(ext, ".") {
|
||||
ext = ext[1:]
|
||||
}
|
||||
|
||||
return ext
|
||||
}
|
||||
|
||||
// decryptKey 获取密码
|
||||
func decryptKey(encodeKey string) []byte {
|
||||
// 移除非法字符
|
||||
u := strings.ReplaceAll(encodeKey, "[^A-Za-z0-9+\\/]", "")
|
||||
|
||||
// 计算输出字节数组的长度
|
||||
o := len(u)
|
||||
a := 32
|
||||
|
||||
// 创建输出字节数组
|
||||
c := make([]byte, a)
|
||||
|
||||
// 编码循环
|
||||
s := uint32(0) // 累加器
|
||||
f := 0 // 输出数组索引
|
||||
for l := 0; l < o; l++ {
|
||||
r := l & 3 // 取模4,得到当前字符在四字节块中的位置
|
||||
i := u[l] // 当前字符的ASCII码
|
||||
|
||||
// 编码当前字符
|
||||
switch {
|
||||
case i >= 65 && i < 91: // 大写字母
|
||||
s |= uint32(i-65) << uint32(6*(3-r))
|
||||
case i >= 97 && i < 123: // 小写字母
|
||||
s |= uint32(i-71) << uint32(6*(3-r))
|
||||
case i >= 48 && i < 58: // 数字
|
||||
s |= uint32(i+4) << uint32(6*(3-r))
|
||||
case i == 43: // 加号
|
||||
s |= uint32(62) << uint32(6*(3-r))
|
||||
case i == 47: // 斜杠
|
||||
s |= uint32(63) << uint32(6*(3-r))
|
||||
}
|
||||
|
||||
// 如果累加器已经包含了四个字符,或者是最后一个字符,则写入输出数组
|
||||
if r == 3 || l == o-1 {
|
||||
for e := 0; e < 3 && f < a; e, f = e+1, f+1 {
|
||||
c[f] = byte(s >> (16 >> e & 24) & 255)
|
||||
}
|
||||
s = 0
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (d *Quqi) linkFromPreview(id string) (*model.Link, error) {
|
||||
var getDocResp GetDocRes
|
||||
if _, err := d.request("", "/api/doc/getDoc", resty.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
"quqi_id": d.GroupID,
|
||||
"tree_id": "1",
|
||||
"node_id": id,
|
||||
"client_id": d.ClientID,
|
||||
})
|
||||
}, &getDocResp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if getDocResp.Data.OriginPath == "" {
|
||||
return nil, errors.New("cannot get link from preview")
|
||||
}
|
||||
return &model.Link{
|
||||
URL: getDocResp.Data.OriginPath,
|
||||
Header: http.Header{
|
||||
"Origin": []string{"https://quqi.com"},
|
||||
"Cookie": []string{d.Cookie},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Quqi) linkFromDownload(id string) (*model.Link, error) {
|
||||
var getDownloadResp GetDownloadResp
|
||||
if _, err := d.request("", "/api/doc/getDownload", resty.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"quqi_id": d.GroupID,
|
||||
"tree_id": "1",
|
||||
"node_id": id,
|
||||
"url_type": "undefined",
|
||||
"entry_type": "undefined",
|
||||
"client_id": d.ClientID,
|
||||
"no_redirect": "1",
|
||||
})
|
||||
}, &getDownloadResp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if getDownloadResp.Data.Url == "" {
|
||||
return nil, errors.New("cannot get link from download")
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: getDownloadResp.Data.Url,
|
||||
Header: http.Header{
|
||||
"Origin": []string{"https://quqi.com"},
|
||||
"Cookie": []string{d.Cookie},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Quqi) linkFromCDN(id string) (*model.Link, error) {
|
||||
downloadLink, err := d.linkFromDownload(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var urlExchangeResp UrlExchangeResp
|
||||
if _, err = d.request("api.quqi.com", "/preview/downloadInfo/url/exchange", resty.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParam("url", downloadLink.URL)
|
||||
}, &urlExchangeResp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if urlExchangeResp.Data.Url == "" {
|
||||
return nil, errors.New("cannot get link from cdn")
|
||||
}
|
||||
|
||||
// 假设存在未加密的情况
|
||||
if !urlExchangeResp.Data.IsEncrypted {
|
||||
return &model.Link{
|
||||
URL: urlExchangeResp.Data.Url,
|
||||
Header: http.Header{
|
||||
"Origin": []string{"https://quqi.com"},
|
||||
"Cookie": []string{d.Cookie},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 根据sio(https://github.com/minio/sio/blob/master/DARE.md)描述及实际测试,得出以下结论:
|
||||
// 1. 加密后大小(encrypted_size)-原始文件大小(size) = 加密包的头大小+身份验证标识 = (16+16) * N -> N为加密包的数量
|
||||
// 2. 原始文件大小(size)+64*1024-1 / (64*1024) = N -> 每个包的有效负载为64K
|
||||
remoteClosers := utils.EmptyClosers()
|
||||
payloadSize := int64(1 << 16)
|
||||
expiration := time.Until(time.Unix(urlExchangeResp.Data.ExpiredTime, 0))
|
||||
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
encryptedOffset := httpRange.Start / payloadSize * (payloadSize + 32)
|
||||
decryptedOffset := httpRange.Start % payloadSize
|
||||
encryptedLength := (httpRange.Length+httpRange.Start+payloadSize-1)/payloadSize*(payloadSize+32) - encryptedOffset
|
||||
if httpRange.Length < 0 {
|
||||
encryptedLength = httpRange.Length
|
||||
} else {
|
||||
if httpRange.Length+httpRange.Start >= urlExchangeResp.Data.Size || encryptedLength+encryptedOffset >= urlExchangeResp.Data.EncryptedSize {
|
||||
encryptedLength = -1
|
||||
}
|
||||
}
|
||||
//log.Debugf("size: %d\tencrypted_size: %d", urlExchangeResp.Data.Size, urlExchangeResp.Data.EncryptedSize)
|
||||
//log.Debugf("http range offset: %d, length: %d", httpRange.Start, httpRange.Length)
|
||||
//log.Debugf("encrypted offset: %d, length: %d, decrypted offset: %d", encryptedOffset, encryptedLength, decryptedOffset)
|
||||
|
||||
rrc, err := stream.GetRangeReadCloserFromLink(urlExchangeResp.Data.EncryptedSize, &model.Link{
|
||||
URL: urlExchangeResp.Data.Url,
|
||||
Header: http.Header{
|
||||
"Origin": []string{"https://quqi.com"},
|
||||
"Cookie": []string{d.Cookie},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rc, err := rrc.RangeRead(ctx, http_range.Range{Start: encryptedOffset, Length: encryptedLength})
|
||||
remoteClosers.AddClosers(rrc.GetClosers())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
decryptReader, err := sio.DecryptReader(rc, sio.Config{
|
||||
MinVersion: sio.Version10,
|
||||
MaxVersion: sio.Version20,
|
||||
CipherSuites: []byte{sio.CHACHA20_POLY1305, sio.AES_256_GCM},
|
||||
Key: decryptKey(urlExchangeResp.Data.EncryptedKey),
|
||||
SequenceNumber: uint32(httpRange.Start / payloadSize),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bufferReader := bufio.NewReader(decryptReader)
|
||||
bufferReader.Discard(int(decryptedOffset))
|
||||
|
||||
return io.NopCloser(bufferReader), nil
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
RangeReadCloser: &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers},
|
||||
Expiration: &expiration,
|
||||
}, nil
|
||||
}
|
@ -1,137 +0,0 @@
|
||||
package trainbit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
|
||||
type Trainbit struct {
|
||||
model.Storage
|
||||
Addition
|
||||
}
|
||||
|
||||
var apiExpiredate, guid string
|
||||
|
||||
func (d *Trainbit) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Trainbit) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Trainbit) Init(ctx context.Context) error {
|
||||
base.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
var err error
|
||||
apiExpiredate, guid, err = getToken(d.ApiKey, d.AUSHELLPORTAL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Trainbit) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Trainbit) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
form := make(url.Values)
|
||||
form.Set("parentid", strings.Split(dir.GetID(), "_")[0])
|
||||
res, err := postForm("https://trainbit.com/lib/api/v1/listoffiles", form, apiExpiredate, d.ApiKey, d.AUSHELLPORTAL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var jsonData any
|
||||
err = json.Unmarshal(data, &jsonData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
object, err := parseRawFileObject(jsonData.(map[string]any)["items"].([]any))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return object, nil
|
||||
}
|
||||
|
||||
func (d *Trainbit) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
res, err := get(fmt.Sprintf("https://trainbit.com/files/%s/", strings.Split(file.GetID(), "_")[0]), d.ApiKey, d.AUSHELLPORTAL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.Link{
|
||||
URL: res.Header.Get("Location"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Trainbit) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
form := make(url.Values)
|
||||
form.Set("name", local2provider(dirName, true))
|
||||
form.Set("parentid", strings.Split(parentDir.GetID(), "_")[0])
|
||||
_, err := postForm("https://trainbit.com/lib/api/v1/createfolder", form, apiExpiredate, d.ApiKey, d.AUSHELLPORTAL)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Trainbit) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
form := make(url.Values)
|
||||
form.Set("sourceid", strings.Split(srcObj.GetID(), "_")[0])
|
||||
form.Set("destinationid", strings.Split(dstDir.GetID(), "_")[0])
|
||||
_, err := postForm("https://trainbit.com/lib/api/v1/move", form, apiExpiredate, d.ApiKey, d.AUSHELLPORTAL)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Trainbit) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
form := make(url.Values)
|
||||
form.Set("id", strings.Split(srcObj.GetID(), "_")[0])
|
||||
form.Set("name", local2provider(newName, srcObj.IsDir()))
|
||||
_, err := postForm("https://trainbit.com/lib/api/v1/edit", form, apiExpiredate, d.ApiKey, d.AUSHELLPORTAL)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Trainbit) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Trainbit) Remove(ctx context.Context, obj model.Obj) error {
|
||||
form := make(url.Values)
|
||||
form.Set("id", strings.Split(obj.GetID(), "_")[0])
|
||||
_, err := postForm("https://trainbit.com/lib/api/v1/delete", form, apiExpiredate, d.ApiKey, d.AUSHELLPORTAL)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
endpoint, _ := url.Parse("https://tb28.trainbit.com/api/upload/send_raw/")
|
||||
query := &url.Values{}
|
||||
query.Add("q", strings.Split(dstDir.GetID(), "_")[1])
|
||||
query.Add("guid", guid)
|
||||
query.Add("name", url.QueryEscape(local2provider(s.GetName(), false)+"."))
|
||||
endpoint.RawQuery = query.Encode()
|
||||
progressReader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint.String(), progressReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "text/json; charset=UTF-8")
|
||||
_, err = base.HttpClient.Do(req)
|
||||
return err
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Trainbit)(nil)
|
@ -1,29 +0,0 @@
|
||||
package trainbit
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
AUSHELLPORTAL string `json:"AUSHELLPORTAL" required:"true"`
|
||||
ApiKey string `json:"apikey" required:"true"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Trainbit",
|
||||
LocalSort: false,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "0_000",
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Trainbit{}
|
||||
})
|
||||
}
|
@ -1 +0,0 @@
|
||||
package trainbit
|
@ -1,124 +0,0 @@
|
||||
package trainbit
|
||||
|
||||
import (
|
||||
"html"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
|
||||
func get(url string, apiKey string, AUSHELLPORTAL string) (*http.Response, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: ".AUSHELLPORTAL",
|
||||
Value: AUSHELLPORTAL,
|
||||
MaxAge: 2 * 60,
|
||||
})
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: "retkeyapi",
|
||||
Value: apiKey,
|
||||
MaxAge: 2 * 60,
|
||||
})
|
||||
res, err := base.HttpClient.Do(req)
|
||||
return res, err
|
||||
}
|
||||
|
||||
func postForm(endpoint string, data url.Values, apiExpiredate string, apiKey string, AUSHELLPORTAL string) (*http.Response, error) {
|
||||
extData := make(url.Values)
|
||||
for key, value := range data {
|
||||
extData[key] = make([]string, len(value))
|
||||
copy(extData[key], value)
|
||||
}
|
||||
extData.Set("apikey", apiKey)
|
||||
extData.Set("expiredate", apiExpiredate)
|
||||
req, err := http.NewRequest(http.MethodPost, endpoint, strings.NewReader(extData.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: ".AUSHELLPORTAL",
|
||||
Value: AUSHELLPORTAL,
|
||||
MaxAge: 2 * 60,
|
||||
})
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: "retkeyapi",
|
||||
Value: apiKey,
|
||||
MaxAge: 2 * 60,
|
||||
})
|
||||
res, err := base.HttpClient.Do(req)
|
||||
return res, err
|
||||
}
|
||||
|
||||
func getToken(apiKey string, AUSHELLPORTAL string) (string, string, error) {
|
||||
res, err := get("https://trainbit.com/files/", apiKey, AUSHELLPORTAL)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
data, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
text := string(data)
|
||||
apiExpiredateReg := regexp.MustCompile(`core.api.expiredate = '([^']*)';`)
|
||||
result := apiExpiredateReg.FindAllStringSubmatch(text, -1)
|
||||
apiExpiredate := result[0][1]
|
||||
guidReg := regexp.MustCompile(`app.vars.upload.guid = '([^']*)';`)
|
||||
result = guidReg.FindAllStringSubmatch(text, -1)
|
||||
guid := result[0][1]
|
||||
return apiExpiredate, guid, nil
|
||||
}
|
||||
|
||||
func local2provider(filename string, isFolder bool) string {
|
||||
if isFolder {
|
||||
return filename
|
||||
}
|
||||
return filename + ".delete_suffix"
|
||||
}
|
||||
|
||||
func provider2local(filename string) string {
|
||||
filename = html.UnescapeString(filename)
|
||||
index := strings.LastIndex(filename, ".delete_suffix")
|
||||
if index != -1 {
|
||||
filename = filename[:index]
|
||||
}
|
||||
return filename
|
||||
}
|
||||
|
||||
func parseRawFileObject(rawObject []any) ([]model.Obj, error) {
|
||||
objectList := make([]model.Obj, 0)
|
||||
for _, each := range rawObject {
|
||||
object := each.(map[string]any)
|
||||
if object["id"].(string) == "0" {
|
||||
continue
|
||||
}
|
||||
isFolder := int64(object["ty"].(float64)) == 1
|
||||
var name string
|
||||
if object["ext"].(string) != "" {
|
||||
name = strings.Join([]string{object["name"].(string), object["ext"].(string)}, ".")
|
||||
} else {
|
||||
name = object["name"].(string)
|
||||
}
|
||||
modified, err := time.Parse("2006/01/02 15:04:05", object["modified"].(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objectList = append(objectList, model.Obj(&model.Object{
|
||||
ID: strings.Join([]string{object["id"].(string), strings.Split(object["uploadurl"].(string), "=")[1]}, "_"),
|
||||
Name: provider2local(name),
|
||||
Size: int64(object["byte"].(float64)),
|
||||
Modified: modified.Add(-210 * time.Minute),
|
||||
IsFolder: isFolder,
|
||||
}))
|
||||
}
|
||||
return objectList, nil
|
||||
}
|
@ -1,36 +0,0 @@
|
||||
package url_tree
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
// Usually one of two
|
||||
// driver.RootPath
|
||||
// driver.RootID
|
||||
// define other
|
||||
UrlStructure string `json:"url_structure" type:"text" required:"true" default:"https://jsd.nn.ci/gh/alist-org/alist/README.md\nhttps://jsd.nn.ci/gh/alist-org/alist/README_cn.md\nfolder:\n CONTRIBUTING.md:1635:https://jsd.nn.ci/gh/alist-org/alist/CONTRIBUTING.md\n CODE_OF_CONDUCT.md:2093:https://jsd.nn.ci/gh/alist-org/alist/CODE_OF_CONDUCT.md" help:"structure:FolderName:\n [FileName:][FileSize:][Modified:]Url"`
|
||||
HeadSize bool `json:"head_size" type:"bool" default:"false" help:"Use head method to get file size, but it may be failed."`
|
||||
Writable bool `json:"writable" type:"bool" default:"false"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "UrlTree",
|
||||
LocalSort: true,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: true,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "",
|
||||
CheckStatus: true,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Urls{}
|
||||
})
|
||||
}
|
@ -1,210 +0,0 @@
|
||||
package vtencent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/cron"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
type Vtencent struct {
|
||||
model.Storage
|
||||
Addition
|
||||
cron *cron.Cron
|
||||
config driver.Config
|
||||
conf Conf
|
||||
}
|
||||
|
||||
func (d *Vtencent) Config() driver.Config {
|
||||
return d.config
|
||||
}
|
||||
|
||||
func (d *Vtencent) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Vtencent) Init(ctx context.Context) error {
|
||||
tfUid, err := d.LoadUser()
|
||||
if err != nil {
|
||||
d.Status = err.Error()
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
d.Addition.TfUid = tfUid
|
||||
op.MustSaveDriverStorage(d)
|
||||
d.cron = cron.NewCron(time.Hour * 12)
|
||||
d.cron.Do(func() {
|
||||
_, err := d.LoadUser()
|
||||
if err != nil {
|
||||
d.Status = err.Error()
|
||||
op.MustSaveDriverStorage(d)
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Vtencent) Drop(ctx context.Context) error {
|
||||
if d.cron != nil {
|
||||
d.cron.Stop()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Vtencent) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
files, err := d.GetFiles(dir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
||||
return fileToObj(src), nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Vtencent) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
form := fmt.Sprintf(`{"MaterialIds":["%s"]}`, file.GetID())
|
||||
var dat map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(form), &dat); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var resps RspDown
|
||||
api := "https://api.vs.tencent.com/SaaS/Material/DescribeMaterialDownloadUrl"
|
||||
rsp, err := d.request(api, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(dat)
|
||||
}, &resps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.Unmarshal(rsp, &resps); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(resps.Data.DownloadURLInfoSet) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
u := resps.Data.DownloadURLInfoSet[0].DownloadURL
|
||||
link := &model.Link{
|
||||
URL: u,
|
||||
Header: http.Header{
|
||||
"Referer": []string{d.conf.referer},
|
||||
"User-Agent": []string{d.conf.ua},
|
||||
},
|
||||
Concurrency: 2,
|
||||
PartSize: 10 * utils.MB,
|
||||
}
|
||||
if file.GetSize() == 0 {
|
||||
link.Concurrency = 0
|
||||
link.PartSize = 0
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
|
||||
func (d *Vtencent) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
classId, err := strconv.Atoi(parentDir.GetID())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = d.request("https://api.vs.tencent.com/PaaS/Material/CreateClass", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"Owner": base.Json{
|
||||
"Type": "PERSON",
|
||||
"Id": d.TfUid,
|
||||
},
|
||||
"ParentClassId": classId,
|
||||
"Name": dirName,
|
||||
"VerifySign": ""})
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Vtencent) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
srcType := "MATERIAL"
|
||||
if srcObj.IsDir() {
|
||||
srcType = "CLASS"
|
||||
}
|
||||
form := fmt.Sprintf(`{"SourceInfos":[
|
||||
{"Owner":{"Id":"%s","Type":"PERSON"},
|
||||
"Resource":{"Type":"%s","Id":"%s"}}
|
||||
],
|
||||
"Destination":{"Owner":{"Id":"%s","Type":"PERSON"},
|
||||
"Resource":{"Type":"CLASS","Id":"%s"}}
|
||||
}`, d.TfUid, srcType, srcObj.GetID(), d.TfUid, dstDir.GetID())
|
||||
var dat map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(form), &dat); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := d.request("https://api.vs.tencent.com/PaaS/Material/MoveResource", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(dat)
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Vtencent) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
api := "https://api.vs.tencent.com/PaaS/Material/ModifyMaterial"
|
||||
form := fmt.Sprintf(`{
|
||||
"Owner":{"Type":"PERSON","Id":"%s"},
|
||||
"MaterialId":"%s","Name":"%s"}`, d.TfUid, srcObj.GetID(), newName)
|
||||
if srcObj.IsDir() {
|
||||
classId, err := strconv.Atoi(srcObj.GetID())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
api = "https://api.vs.tencent.com/PaaS/Material/ModifyClass"
|
||||
form = fmt.Sprintf(`{"Owner":{"Type":"PERSON","Id":"%s"},
|
||||
"ClassId":%d,"Name":"%s"}`, d.TfUid, classId, newName)
|
||||
}
|
||||
var dat map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(form), &dat); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := d.request(api, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(dat)
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Vtencent) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
// TODO copy obj, optional
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Vtencent) Remove(ctx context.Context, obj model.Obj) error {
|
||||
srcType := "MATERIAL"
|
||||
if obj.IsDir() {
|
||||
srcType = "CLASS"
|
||||
}
|
||||
form := fmt.Sprintf(`{
|
||||
"SourceInfos":[
|
||||
{"Owner":{"Type":"PERSON","Id":"%s"},
|
||||
"Resource":{"Type":"%s","Id":"%s"}}
|
||||
]
|
||||
}`, d.TfUid, srcType, obj.GetID())
|
||||
var dat map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(form), &dat); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := d.request("https://api.vs.tencent.com/PaaS/Material/DeleteResource", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(dat)
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Vtencent) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
err := d.FileUpload(ctx, dstDir, stream, up)
|
||||
return err
|
||||
}
|
||||
|
||||
//func (d *Vtencent) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
var _ driver.Driver = (*Vtencent)(nil)
|
@ -1,39 +0,0 @@
|
||||
package vtencent
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
Cookie string `json:"cookie" required:"true"`
|
||||
TfUid string `json:"tf_uid"`
|
||||
OrderBy string `json:"order_by" type:"select" options:"Name,Size,UpdateTime,CreatTime"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"Asc,Desc"`
|
||||
}
|
||||
|
||||
type Conf struct {
|
||||
ua string
|
||||
referer string
|
||||
origin string
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Vtencent{
|
||||
config: driver.Config{
|
||||
Name: "VTencent",
|
||||
OnlyProxy: true,
|
||||
OnlyLocal: false,
|
||||
DefaultRoot: "9",
|
||||
NoOverwriteUpload: true,
|
||||
},
|
||||
conf: Conf{
|
||||
ua: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) quark-cloud-drive/2.5.20 Chrome/100.0.4896.160 Electron/18.3.5.4-b478491100 Safari/537.36 Channel/pckk_other_ch",
|
||||
referer: "https://app.v.tencent.com/",
|
||||
origin: "https://app.v.tencent.com",
|
||||
},
|
||||
}
|
||||
})
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
package vtencent
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
)
|
||||
|
||||
func QSignatureKey(timeKey string, signPath string, key string) string {
|
||||
signKey := hmac.New(sha1.New, []byte(key))
|
||||
signKey.Write([]byte(timeKey))
|
||||
signKeyBytes := signKey.Sum(nil)
|
||||
signKeyHex := hex.EncodeToString(signKeyBytes)
|
||||
sha := sha1.New()
|
||||
sha.Write([]byte(signPath))
|
||||
shaBytes := sha.Sum(nil)
|
||||
shaHex := hex.EncodeToString(shaBytes)
|
||||
|
||||
O := "sha1\n" + timeKey + "\n" + shaHex + "\n"
|
||||
dataSignKey := hmac.New(sha1.New, []byte(signKeyHex))
|
||||
dataSignKey.Write([]byte(O))
|
||||
dataSignKeyBytes := dataSignKey.Sum(nil)
|
||||
dataSignKeyHex := hex.EncodeToString(dataSignKeyBytes)
|
||||
return dataSignKeyHex
|
||||
}
|
||||
|
||||
func QTwoSignatureKey(timeKey string, key string) string {
|
||||
signKey := hmac.New(sha1.New, []byte(key))
|
||||
signKey.Write([]byte(timeKey))
|
||||
signKeyBytes := signKey.Sum(nil)
|
||||
signKeyHex := hex.EncodeToString(signKeyBytes)
|
||||
return signKeyHex
|
||||
}
|
@ -1,252 +0,0 @@
|
||||
package vtencent
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
|
||||
type RespErr struct {
|
||||
Code string `json:"Code"`
|
||||
Message string `json:"Message"`
|
||||
}
|
||||
|
||||
type Reqfiles struct {
|
||||
ScrollToken string `json:"ScrollToken"`
|
||||
Text string `json:"Text"`
|
||||
Offset int `json:"Offset"`
|
||||
Limit int `json:"Limit"`
|
||||
Sort struct {
|
||||
Field string `json:"Field"`
|
||||
Order string `json:"Order"`
|
||||
} `json:"Sort"`
|
||||
CreateTimeRanges []any `json:"CreateTimeRanges"`
|
||||
MaterialTypes []any `json:"MaterialTypes"`
|
||||
ReviewStatuses []any `json:"ReviewStatuses"`
|
||||
Tags []any `json:"Tags"`
|
||||
SearchScopes []struct {
|
||||
Owner struct {
|
||||
Type string `json:"Type"`
|
||||
ID string `json:"Id"`
|
||||
} `json:"Owner"`
|
||||
ClassID int `json:"ClassId"`
|
||||
SearchOneDepth bool `json:"SearchOneDepth"`
|
||||
} `json:"SearchScopes"`
|
||||
}
|
||||
|
||||
type File struct {
|
||||
Type string `json:"Type"`
|
||||
ClassInfo struct {
|
||||
ClassID int `json:"ClassId"`
|
||||
Name string `json:"Name"`
|
||||
UpdateTime time.Time `json:"UpdateTime"`
|
||||
CreateTime time.Time `json:"CreateTime"`
|
||||
FileInboxID string `json:"FileInboxId"`
|
||||
Owner struct {
|
||||
Type string `json:"Type"`
|
||||
ID string `json:"Id"`
|
||||
} `json:"Owner"`
|
||||
ClassPath string `json:"ClassPath"`
|
||||
ParentClassID int `json:"ParentClassId"`
|
||||
AttachmentInfo struct {
|
||||
SubClassCount int `json:"SubClassCount"`
|
||||
MaterialCount int `json:"MaterialCount"`
|
||||
Size int64 `json:"Size"`
|
||||
} `json:"AttachmentInfo"`
|
||||
ClassPreviewURLSet []string `json:"ClassPreviewUrlSet"`
|
||||
} `json:"ClassInfo"`
|
||||
MaterialInfo struct {
|
||||
BasicInfo struct {
|
||||
MaterialID string `json:"MaterialId"`
|
||||
MaterialType string `json:"MaterialType"`
|
||||
Name string `json:"Name"`
|
||||
CreateTime time.Time `json:"CreateTime"`
|
||||
UpdateTime time.Time `json:"UpdateTime"`
|
||||
ClassPath string `json:"ClassPath"`
|
||||
ClassID int `json:"ClassId"`
|
||||
TagInfoSet []any `json:"TagInfoSet"`
|
||||
TagSet []any `json:"TagSet"`
|
||||
PreviewURL string `json:"PreviewUrl"`
|
||||
MediaURL string `json:"MediaUrl"`
|
||||
UnifiedMediaPreviewURL string `json:"UnifiedMediaPreviewUrl"`
|
||||
Owner struct {
|
||||
Type string `json:"Type"`
|
||||
ID string `json:"Id"`
|
||||
} `json:"Owner"`
|
||||
PermissionSet any `json:"PermissionSet"`
|
||||
PermissionInfoSet []any `json:"PermissionInfoSet"`
|
||||
TfUID string `json:"TfUid"`
|
||||
GroupID string `json:"GroupId"`
|
||||
VersionMaterialIDSet []any `json:"VersionMaterialIdSet"`
|
||||
FileType string `json:"FileType"`
|
||||
CmeMaterialPlayList []any `json:"CmeMaterialPlayList"`
|
||||
Status string `json:"Status"`
|
||||
DownloadSwitch string `json:"DownloadSwitch"`
|
||||
} `json:"BasicInfo"`
|
||||
MediaInfo struct {
|
||||
Width int `json:"Width"`
|
||||
Height int `json:"Height"`
|
||||
Size int `json:"Size"`
|
||||
Duration float64 `json:"Duration"`
|
||||
Fps int `json:"Fps"`
|
||||
BitRate int `json:"BitRate"`
|
||||
Codec string `json:"Codec"`
|
||||
MediaType string `json:"MediaType"`
|
||||
FavoriteStatus string `json:"FavoriteStatus"`
|
||||
} `json:"MediaInfo"`
|
||||
MaterialStatus struct {
|
||||
ContentReviewStatus string `json:"ContentReviewStatus"`
|
||||
EditorUsableStatus string `json:"EditorUsableStatus"`
|
||||
UnifiedPreviewStatus string `json:"UnifiedPreviewStatus"`
|
||||
EditPreviewImageSpiritStatus string `json:"EditPreviewImageSpiritStatus"`
|
||||
TranscodeStatus string `json:"TranscodeStatus"`
|
||||
AdaptiveStreamingStatus string `json:"AdaptiveStreamingStatus"`
|
||||
StreamConnectable string `json:"StreamConnectable"`
|
||||
AiAnalysisStatus string `json:"AiAnalysisStatus"`
|
||||
AiRecognitionStatus string `json:"AiRecognitionStatus"`
|
||||
} `json:"MaterialStatus"`
|
||||
ImageMaterial struct {
|
||||
Height int `json:"Height"`
|
||||
Width int `json:"Width"`
|
||||
Size int `json:"Size"`
|
||||
MaterialURL string `json:"MaterialUrl"`
|
||||
Resolution string `json:"Resolution"`
|
||||
VodFileID string `json:"VodFileId"`
|
||||
OriginalURL string `json:"OriginalUrl"`
|
||||
} `json:"ImageMaterial"`
|
||||
VideoMaterial struct {
|
||||
MetaData struct {
|
||||
Size int `json:"Size"`
|
||||
Container string `json:"Container"`
|
||||
Bitrate int `json:"Bitrate"`
|
||||
Height int `json:"Height"`
|
||||
Width int `json:"Width"`
|
||||
Duration float64 `json:"Duration"`
|
||||
Rotate int `json:"Rotate"`
|
||||
VideoStreamInfoSet []struct {
|
||||
Bitrate int `json:"Bitrate"`
|
||||
Height int `json:"Height"`
|
||||
Width int `json:"Width"`
|
||||
Codec string `json:"Codec"`
|
||||
Fps int `json:"Fps"`
|
||||
} `json:"VideoStreamInfoSet"`
|
||||
AudioStreamInfoSet []struct {
|
||||
Bitrate int `json:"Bitrate"`
|
||||
SamplingRate int `json:"SamplingRate"`
|
||||
Codec string `json:"Codec"`
|
||||
} `json:"AudioStreamInfoSet"`
|
||||
} `json:"MetaData"`
|
||||
ImageSpriteInfo any `json:"ImageSpriteInfo"`
|
||||
MaterialURL string `json:"MaterialUrl"`
|
||||
CoverURL string `json:"CoverUrl"`
|
||||
Resolution string `json:"Resolution"`
|
||||
VodFileID string `json:"VodFileId"`
|
||||
OriginalURL string `json:"OriginalUrl"`
|
||||
AudioWaveformURL string `json:"AudioWaveformUrl"`
|
||||
SubtitleURL string `json:"SubtitleUrl"`
|
||||
TranscodeInfoSet []any `json:"TranscodeInfoSet"`
|
||||
ImageSpriteInfoSet []any `json:"ImageSpriteInfoSet"`
|
||||
} `json:"VideoMaterial"`
|
||||
} `json:"MaterialInfo"`
|
||||
}
|
||||
|
||||
type RspFiles struct {
|
||||
Code string `json:"Code"`
|
||||
Message string `json:"Message"`
|
||||
EnglishMessage string `json:"EnglishMessage"`
|
||||
Data struct {
|
||||
TotalCount int `json:"TotalCount"`
|
||||
ResourceInfoSet []File `json:"ResourceInfoSet"`
|
||||
ScrollToken string `json:"ScrollToken"`
|
||||
} `json:"Data"`
|
||||
}
|
||||
|
||||
type RspDown struct {
|
||||
Code string `json:"Code"`
|
||||
Message string `json:"Message"`
|
||||
EnglishMessage string `json:"EnglishMessage"`
|
||||
Data struct {
|
||||
DownloadURLInfoSet []struct {
|
||||
MaterialID string `json:"MaterialId"`
|
||||
DownloadURL string `json:"DownloadUrl"`
|
||||
} `json:"DownloadUrlInfoSet"`
|
||||
} `json:"Data"`
|
||||
}
|
||||
|
||||
type RspCreatrMaterial struct {
|
||||
Code string `json:"Code"`
|
||||
Message string `json:"Message"`
|
||||
EnglishMessage string `json:"EnglishMessage"`
|
||||
Data struct {
|
||||
UploadContext string `json:"UploadContext"`
|
||||
VodUploadSign string `json:"VodUploadSign"`
|
||||
QuickUpload bool `json:"QuickUpload"`
|
||||
} `json:"Data"`
|
||||
}
|
||||
|
||||
type RspApplyUploadUGC struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
Video struct {
|
||||
StorageSignature string `json:"storageSignature"`
|
||||
StoragePath string `json:"storagePath"`
|
||||
} `json:"video"`
|
||||
StorageAppID int `json:"storageAppId"`
|
||||
StorageBucket string `json:"storageBucket"`
|
||||
StorageRegion string `json:"storageRegion"`
|
||||
StorageRegionV5 string `json:"storageRegionV5"`
|
||||
Domain string `json:"domain"`
|
||||
VodSessionKey string `json:"vodSessionKey"`
|
||||
TempCertificate struct {
|
||||
SecretID string `json:"secretId"`
|
||||
SecretKey string `json:"secretKey"`
|
||||
Token string `json:"token"`
|
||||
ExpiredTime int `json:"expiredTime"`
|
||||
} `json:"tempCertificate"`
|
||||
AppID int `json:"appId"`
|
||||
Timestamp int `json:"timestamp"`
|
||||
StorageRegionV50 string `json:"StorageRegionV5"`
|
||||
MiniProgramAccelerateHost string `json:"MiniProgramAccelerateHost"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type RspCommitUploadUGC struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
Video struct {
|
||||
URL string `json:"url"`
|
||||
VerifyContent string `json:"verify_content"`
|
||||
} `json:"video"`
|
||||
FileID string `json:"fileId"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type RspFinishUpload struct {
|
||||
Code string `json:"Code"`
|
||||
Message string `json:"Message"`
|
||||
EnglishMessage string `json:"EnglishMessage"`
|
||||
Data struct {
|
||||
MaterialID string `json:"MaterialId"`
|
||||
} `json:"Data"`
|
||||
}
|
||||
|
||||
func fileToObj(f File) *model.Object {
|
||||
obj := &model.Object{}
|
||||
if f.Type == "CLASS" {
|
||||
obj.Name = f.ClassInfo.Name
|
||||
obj.ID = strconv.Itoa(f.ClassInfo.ClassID)
|
||||
obj.IsFolder = true
|
||||
obj.Modified = f.ClassInfo.CreateTime
|
||||
obj.Size = 0
|
||||
} else if f.Type == "MATERIAL" {
|
||||
obj.Name = f.MaterialInfo.BasicInfo.Name
|
||||
obj.ID = f.MaterialInfo.BasicInfo.MaterialID
|
||||
obj.IsFolder = false
|
||||
obj.Modified = f.MaterialInfo.BasicInfo.CreateTime
|
||||
obj.Size = int64(f.MaterialInfo.MediaInfo.Size)
|
||||
}
|
||||
return obj
|
||||
}
|
@ -1,301 +0,0 @@
|
||||
package vtencent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
func (d *Vtencent) request(url, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
"cookie": d.Cookie,
|
||||
"content-type": "application/json",
|
||||
"origin": d.conf.origin,
|
||||
"referer": d.conf.referer,
|
||||
})
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
} else {
|
||||
req.SetBody("{}")
|
||||
}
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
res, err := req.Execute(method, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
code := utils.Json.Get(res.Body(), "Code").ToString()
|
||||
if code != "Success" {
|
||||
switch code {
|
||||
case "AuthFailure.SessionInvalid":
|
||||
if err != nil {
|
||||
return nil, errors.New(code)
|
||||
}
|
||||
default:
|
||||
return nil, errors.New(code)
|
||||
}
|
||||
return d.request(url, method, callback, resp)
|
||||
}
|
||||
return res.Body(), nil
|
||||
}
|
||||
|
||||
func (d *Vtencent) ugcRequest(url, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
"cookie": d.Cookie,
|
||||
"content-type": "application/json",
|
||||
"origin": d.conf.origin,
|
||||
"referer": d.conf.referer,
|
||||
})
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
} else {
|
||||
req.SetBody("{}")
|
||||
}
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
res, err := req.Execute(method, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
code := utils.Json.Get(res.Body(), "Code").ToInt()
|
||||
if code != 0 {
|
||||
message := utils.Json.Get(res.Body(), "message").ToString()
|
||||
if len(message) == 0 {
|
||||
message = utils.Json.Get(res.Body(), "msg").ToString()
|
||||
}
|
||||
return nil, errors.New(message)
|
||||
}
|
||||
return res.Body(), nil
|
||||
}
|
||||
|
||||
func (d *Vtencent) LoadUser() (string, error) {
|
||||
api := "https://api.vs.tencent.com/SaaS/Account/DescribeAccount"
|
||||
res, err := d.request(api, http.MethodPost, func(req *resty.Request) {}, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return utils.Json.Get(res, "Data", "TfUid").ToString(), nil
|
||||
}
|
||||
|
||||
func (d *Vtencent) GetFiles(dirId string) ([]File, error) {
|
||||
var res []File
|
||||
//offset := 0
|
||||
for {
|
||||
api := "https://api.vs.tencent.com/PaaS/Material/SearchResource"
|
||||
form := fmt.Sprintf(`{
|
||||
"Text":"",
|
||||
"Text":"",
|
||||
"Offset":%d,
|
||||
"Limit":50,
|
||||
"Sort":{"Field":"%s","Order":"%s"},
|
||||
"CreateTimeRanges":[],
|
||||
"MaterialTypes":[],
|
||||
"ReviewStatuses":[],
|
||||
"Tags":[],
|
||||
"SearchScopes":[{"Owner":{"Type":"PERSON","Id":"%s"},"ClassId":%s,"SearchOneDepth":true}]
|
||||
}`, len(res), d.Addition.OrderBy, d.Addition.OrderDirection, d.TfUid, dirId)
|
||||
var resp RspFiles
|
||||
_, err := d.request(api, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(form).ForceContentType("application/json")
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, resp.Data.ResourceInfoSet...)
|
||||
if len(resp.Data.ResourceInfoSet) <= 0 || len(res) >= resp.Data.TotalCount {
|
||||
break
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (d *Vtencent) CreateUploadMaterial(classId int, fileName string, UploadSummaryKey string) (RspCreatrMaterial, error) {
|
||||
api := "https://api.vs.tencent.com/PaaS/Material/CreateUploadMaterial"
|
||||
form := base.Json{"Owner": base.Json{"Type": "PERSON", "Id": d.TfUid},
|
||||
"MaterialType": "VIDEO", "Name": fileName, "ClassId": classId,
|
||||
"UploadSummaryKey": UploadSummaryKey}
|
||||
var resps RspCreatrMaterial
|
||||
_, err := d.request(api, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(form).ForceContentType("application/json")
|
||||
}, &resps)
|
||||
if err != nil {
|
||||
return RspCreatrMaterial{}, err
|
||||
}
|
||||
return resps, nil
|
||||
}
|
||||
|
||||
func (d *Vtencent) ApplyUploadUGC(signature string, stream model.FileStreamer) (RspApplyUploadUGC, error) {
|
||||
api := "https://vod2.qcloud.com/v3/index.php?Action=ApplyUploadUGC"
|
||||
form := base.Json{
|
||||
"signature": signature,
|
||||
"videoName": stream.GetName(),
|
||||
"videoType": strings.ReplaceAll(path.Ext(stream.GetName()), ".", ""),
|
||||
"videoSize": stream.GetSize(),
|
||||
}
|
||||
var resps RspApplyUploadUGC
|
||||
_, err := d.ugcRequest(api, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(form).ForceContentType("application/json")
|
||||
}, &resps)
|
||||
if err != nil {
|
||||
return RspApplyUploadUGC{}, err
|
||||
}
|
||||
return resps, nil
|
||||
}
|
||||
|
||||
func (d *Vtencent) CommitUploadUGC(signature string, vodSessionKey string) (RspCommitUploadUGC, error) {
|
||||
api := "https://vod2.qcloud.com/v3/index.php?Action=CommitUploadUGC"
|
||||
form := base.Json{
|
||||
"signature": signature,
|
||||
"vodSessionKey": vodSessionKey,
|
||||
}
|
||||
var resps RspCommitUploadUGC
|
||||
rsp, err := d.ugcRequest(api, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(form).ForceContentType("application/json")
|
||||
}, &resps)
|
||||
if err != nil {
|
||||
return RspCommitUploadUGC{}, err
|
||||
}
|
||||
if len(resps.Data.Video.URL) == 0 {
|
||||
return RspCommitUploadUGC{}, errors.New(string(rsp))
|
||||
}
|
||||
return resps, nil
|
||||
}
|
||||
|
||||
func (d *Vtencent) FinishUploadMaterial(SummaryKey string, VodVerifyKey string, UploadContext, VodFileId string) (RspFinishUpload, error) {
|
||||
api := "https://api.vs.tencent.com/PaaS/Material/FinishUploadMaterial"
|
||||
form := base.Json{
|
||||
"UploadContext": UploadContext,
|
||||
"VodVerifyKey": VodVerifyKey,
|
||||
"VodFileId": VodFileId,
|
||||
"UploadFullKey": SummaryKey}
|
||||
var resps RspFinishUpload
|
||||
rsp, err := d.request(api, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(form).ForceContentType("application/json")
|
||||
}, &resps)
|
||||
if err != nil {
|
||||
return RspFinishUpload{}, err
|
||||
}
|
||||
if len(resps.Data.MaterialID) == 0 {
|
||||
return RspFinishUpload{}, errors.New(string(rsp))
|
||||
}
|
||||
return resps, nil
|
||||
}
|
||||
|
||||
func (d *Vtencent) FinishHashUploadMaterial(SummaryKey string, UploadContext string) (RspFinishUpload, error) {
|
||||
api := "https://api.vs.tencent.com/PaaS/Material/FinishUploadMaterial"
|
||||
var resps RspFinishUpload
|
||||
form := base.Json{
|
||||
"UploadContext": UploadContext,
|
||||
"UploadFullKey": SummaryKey}
|
||||
rsp, err := d.request(api, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(form).ForceContentType("application/json")
|
||||
}, &resps)
|
||||
if err != nil {
|
||||
return RspFinishUpload{}, err
|
||||
}
|
||||
if len(resps.Data.MaterialID) == 0 {
|
||||
return RspFinishUpload{}, errors.New(string(rsp))
|
||||
}
|
||||
return resps, nil
|
||||
}
|
||||
|
||||
func (d *Vtencent) FileUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
classId, err := strconv.Atoi(dstDir.GetID())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
const chunkLength int64 = 1024 * 1024 * 10
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: chunkLength})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chunkHash, err := utils.HashReader(utils.SHA1, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rspCreatrMaterial, err := d.CreateUploadMaterial(classId, stream.GetName(), chunkHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rspCreatrMaterial.Data.QuickUpload {
|
||||
SummaryKey := stream.GetHash().GetHash(utils.SHA1)
|
||||
if len(SummaryKey) < utils.SHA1.Width {
|
||||
if SummaryKey, err = utils.HashReader(utils.SHA1, stream); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
UploadContext := rspCreatrMaterial.Data.UploadContext
|
||||
_, err = d.FinishHashUploadMaterial(SummaryKey, UploadContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
hash := sha1.New()
|
||||
rspUGC, err := d.ApplyUploadUGC(rspCreatrMaterial.Data.VodUploadSign, stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
params := rspUGC.Data
|
||||
certificate := params.TempCertificate
|
||||
cfg := &aws.Config{
|
||||
HTTPClient: base.HttpClient,
|
||||
// S3ForcePathStyle: aws.Bool(true),
|
||||
Credentials: credentials.NewStaticCredentials(certificate.SecretID, certificate.SecretKey, certificate.Token),
|
||||
Region: aws.String(params.StorageRegionV5),
|
||||
Endpoint: aws.String(fmt.Sprintf("cos.%s.myqcloud.com", params.StorageRegionV5)),
|
||||
}
|
||||
ss, err := session.NewSession(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uploader := s3manager.NewUploader(ss)
|
||||
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||
}
|
||||
input := &s3manager.UploadInput{
|
||||
Bucket: aws.String(fmt.Sprintf("%s-%d", params.StorageBucket, params.StorageAppID)),
|
||||
Key: ¶ms.Video.StoragePath,
|
||||
Body: driver.NewLimitedUploadStream(ctx,
|
||||
io.TeeReader(stream, io.MultiWriter(hash, driver.NewProgress(stream.GetSize(), up)))),
|
||||
}
|
||||
_, err = uploader.UploadWithContext(ctx, input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rspCommitUGC, err := d.CommitUploadUGC(rspCreatrMaterial.Data.VodUploadSign, rspUGC.Data.VodSessionKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
VodVerifyKey := rspCommitUGC.Data.Video.VerifyContent
|
||||
VodFileId := rspCommitUGC.Data.FileID
|
||||
UploadContext := rspCreatrMaterial.Data.UploadContext
|
||||
SummaryKey := hex.EncodeToString(hash.Sum(nil))
|
||||
_, err = d.FinishUploadMaterial(SummaryKey, VodVerifyKey, UploadContext, VodFileId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
umask ${UMASK}
|
||||
|
||||
if [ "$1" = "version" ]; then
|
||||
./alist version
|
||||
else
|
||||
if [ "$RUN_ARIA2" = "true" ]; then
|
||||
chown -R ${PUID}:${PGID} /opt/aria2/
|
||||
exec su-exec ${PUID}:${PGID} nohup aria2c \
|
||||
--enable-rpc \
|
||||
--rpc-allow-origin-all \
|
||||
--conf-path=/opt/aria2/.aria2/aria2.conf \
|
||||
>/dev/null 2>&1 &
|
||||
fi
|
||||
|
||||
chown -R ${PUID}:${PGID} /opt/alist/
|
||||
exec su-exec ${PUID}:${PGID} ./alist server --no-prefix
|
||||
fi
|
277
go.mod
277
go.mod
@ -1,265 +1,50 @@
|
||||
module github.com/alist-org/alist/v3
|
||||
module github.com/OpenListTeam/OpenList/v5
|
||||
|
||||
go 1.23.4
|
||||
go 1.24
|
||||
|
||||
require (
|
||||
github.com/KirCute/ftpserverlib-pasvportmap v1.25.0
|
||||
github.com/KirCute/sftpd-alist v0.0.12
|
||||
github.com/ProtonMail/go-crypto v1.0.0
|
||||
github.com/SheltonZhu/115driver v1.0.34
|
||||
github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21
|
||||
github.com/Xhofe/rateg v0.0.0-20230728072201-251a4e1adad4
|
||||
github.com/alist-org/gofakes3 v0.0.7
|
||||
github.com/alist-org/times v0.0.0-20240721124654-efa0c7d3ad92
|
||||
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
|
||||
github.com/avast/retry-go v3.0.0+incompatible
|
||||
github.com/aws/aws-sdk-go v1.55.5
|
||||
github.com/blevesearch/bleve/v2 v2.4.2
|
||||
github.com/caarlos0/env/v9 v9.0.0
|
||||
github.com/charmbracelet/bubbles v0.20.0
|
||||
github.com/charmbracelet/bubbletea v1.1.0
|
||||
github.com/charmbracelet/lipgloss v0.13.0
|
||||
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible
|
||||
github.com/deckarep/golang-set/v2 v2.6.0
|
||||
github.com/dhowden/tag v0.0.0-20240417053706-3d75831295e8
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
github.com/dlclark/regexp2 v1.11.4
|
||||
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564
|
||||
github.com/foxxorcat/mopan-sdk-go v0.1.6
|
||||
github.com/foxxorcat/weiyun-sdk-go v0.1.3
|
||||
github.com/gin-contrib/cors v1.7.2
|
||||
github.com/gin-gonic/gin v1.10.0
|
||||
github.com/go-resty/resty/v2 v2.14.0
|
||||
github.com/go-webauthn/webauthn v0.11.1
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0
|
||||
github.com/hirochachacha/go-smb2 v1.1.0
|
||||
github.com/ipfs/go-ipfs-api v0.7.0
|
||||
github.com/jlaffaye/ftp v0.2.0
|
||||
github.com/gin-contrib/cors v1.7.6
|
||||
github.com/gin-gonic/gin v1.10.1
|
||||
github.com/hashicorp/go-plugin v1.7.0
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/kdomanski/iso9660 v0.4.0
|
||||
github.com/larksuite/oapi-sdk-go/v3 v3.3.1
|
||||
github.com/maruel/natural v1.1.1
|
||||
github.com/meilisearch/meilisearch-go v0.27.2
|
||||
github.com/mholt/archives v0.1.0
|
||||
github.com/minio/sio v0.4.0
|
||||
github.com/natefinch/lumberjack v2.0.0+incompatible
|
||||
github.com/ncw/swift/v2 v2.0.3
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.13.6
|
||||
github.com/pquerna/otp v1.4.0
|
||||
github.com/rclone/rclone v1.67.0
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.11.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7
|
||||
github.com/u2takey/ffmpeg-go v0.5.0
|
||||
github.com/upyun/go-sdk/v3 v3.0.4
|
||||
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5
|
||||
github.com/xhofe/tache v0.1.5
|
||||
github.com/xhofe/wopan-sdk-go v0.1.3
|
||||
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9
|
||||
github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22
|
||||
golang.org/x/crypto v0.36.0
|
||||
golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e
|
||||
golang.org/x/image v0.19.0
|
||||
golang.org/x/net v0.37.0
|
||||
golang.org/x/oauth2 v0.22.0
|
||||
golang.org/x/time v0.8.0
|
||||
google.golang.org/appengine v1.6.8
|
||||
gopkg.in/ldap.v3 v3.1.0
|
||||
gorm.io/driver/mysql v1.5.7
|
||||
gorm.io/driver/postgres v1.5.9
|
||||
gorm.io/driver/sqlite v1.5.6
|
||||
gorm.io/gorm v1.25.11
|
||||
github.com/spf13/cobra v1.9.1
|
||||
golang.org/x/net v0.43.0
|
||||
google.golang.org/grpc v1.74.2
|
||||
google.golang.org/protobuf v1.36.7
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/STARRY-S/zip v0.2.1 // indirect
|
||||
github.com/aymerick/douceur v0.2.0 // indirect
|
||||
github.com/blevesearch/go-faiss v1.0.20 // indirect
|
||||
github.com/blevesearch/zapx/v16 v16.1.5 // indirect
|
||||
github.com/bodgit/plumbing v1.3.0 // indirect
|
||||
github.com/bodgit/sevenzip v1.6.0 // indirect
|
||||
github.com/bodgit/windows v1.0.1 // indirect
|
||||
github.com/bytedance/sonic/loader v0.1.1 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.2.3 // indirect
|
||||
github.com/charmbracelet/x/term v0.2.0 // indirect
|
||||
github.com/cloudflare/circl v1.3.7 // indirect
|
||||
github.com/cloudwego/base64x v0.1.4 // indirect
|
||||
github.com/cloudwego/iasm v0.2.0 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
github.com/fclairamb/go-log v0.5.0 // indirect
|
||||
github.com/gorilla/css v1.0.1 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hekmon/cunits/v2 v2.1.0 // indirect
|
||||
github.com/ipfs/boxo v0.12.0 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/matoous/go-nanoid/v2 v2.1.0 // indirect
|
||||
github.com/microcosm-cc/bluemonday v1.0.27
|
||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect
|
||||
github.com/sorairolake/lzip-go v0.3.5 // indirect
|
||||
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect
|
||||
github.com/therootcompany/xz v1.0.1 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/xhofe/115-sdk-go v0.1.1
|
||||
github.com/yuin/goldmark v1.7.8
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||
resty.dev/v3 v3.0.0-beta.2 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd // indirect
|
||||
github.com/RoaringBitmap/roaring v1.9.3 // indirect
|
||||
github.com/abbot/go-http-auth v0.4.0 // indirect
|
||||
github.com/aead/ecdh v0.2.0 // indirect
|
||||
github.com/andreburgaud/crypt2go v1.8.0 // indirect
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/axgle/mahonia v0.0.0-20180208002826-3358181d7394
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/benbjohnson/clock v1.3.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.12.0 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/blevesearch/bleve_index_api v1.1.10 // indirect
|
||||
github.com/blevesearch/geo v0.1.20 // indirect
|
||||
github.com/blevesearch/go-porterstemmer v1.0.3 // indirect
|
||||
github.com/blevesearch/gtreap v0.1.1 // indirect
|
||||
github.com/blevesearch/mmap-go v1.0.4 // indirect
|
||||
github.com/blevesearch/scorch_segment_api/v2 v2.2.15 // indirect
|
||||
github.com/blevesearch/segment v0.9.1 // indirect
|
||||
github.com/blevesearch/snowballstem v0.9.0 // indirect
|
||||
github.com/blevesearch/upsidedown_store_api v1.0.2 // indirect
|
||||
github.com/blevesearch/vellum v1.0.10 // indirect
|
||||
github.com/blevesearch/zapx/v11 v11.3.10 // indirect
|
||||
github.com/blevesearch/zapx/v12 v12.3.10 // indirect
|
||||
github.com/blevesearch/zapx/v13 v13.3.10 // indirect
|
||||
github.com/blevesearch/zapx/v14 v14.3.10 // indirect
|
||||
github.com/blevesearch/zapx/v15 v15.3.13 // indirect
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
|
||||
github.com/bytedance/sonic v1.11.6 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
||||
github.com/geoffgarside/ber v1.1.0 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.0.12 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/bytedance/sonic v1.14.0 // indirect
|
||||
github.com/bytedance/sonic/loader v0.3.0 // indirect
|
||||
github.com/cloudwego/base64x v0.1.6 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
|
||||
github.com/gin-contrib/sse v1.1.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.20.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.0 // indirect
|
||||
github.com/go-webauthn/x v0.1.12 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
|
||||
github.com/go-playground/validator/v10 v10.27.0 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/go-tpm v0.9.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/hashicorp/go-hclog v1.6.3 // indirect
|
||||
github.com/hashicorp/yamux v0.1.2 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/ipfs/go-cid v0.4.1
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.5 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
|
||||
github.com/libp2p/go-libp2p v0.27.8 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.22 // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/mschoch/smat v0.2.0 // indirect
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||
github.com/muesli/termenv v0.15.2 // indirect
|
||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multiaddr v0.9.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||
github.com/multiformats/go-multicodec v0.9.0 // indirect
|
||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||
github.com/multiformats/go-multistream v0.4.1 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
github.com/otiai10/copy v1.14.0
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
|
||||
github.com/pquerna/cachecontrol v0.1.0 // indirect
|
||||
github.com/prometheus/client_golang v1.19.1 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/rfjakob/eme v1.1.2 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df // indirect
|
||||
github.com/shirou/gopsutil/v3 v3.24.4 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.13 // indirect
|
||||
github.com/tklauser/numcpus v0.7.0 // indirect
|
||||
github.com/oklog/run v1.2.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/spf13/pflag v1.0.7 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/u2takey/go-utils v0.3.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/fasthttp v1.37.1-0.20220607072126-8a320890c08d // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.etcd.io/bbolt v1.3.8 // indirect
|
||||
golang.org/x/arch v0.8.0 // indirect
|
||||
golang.org/x/sync v0.12.0
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/text v0.23.0
|
||||
golang.org/x/tools v0.24.0 // indirect
|
||||
google.golang.org/api v0.169.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect
|
||||
google.golang.org/grpc v1.66.0
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
github.com/ugorji/go/codec v1.3.0 // indirect
|
||||
golang.org/x/arch v0.20.0 // indirect
|
||||
golang.org/x/crypto v0.41.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
golang.org/x/text v0.28.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
lukechampine.com/blake3 v1.1.7 // indirect
|
||||
)
|
||||
|
||||
// replace github.com/xhofe/115-sdk-go => ../../xhofe/115-sdk-go
|
||||
|
@ -1,7 +0,0 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
_ "github.com/alist-org/alist/v3/internal/archive/archives"
|
||||
_ "github.com/alist-org/alist/v3/internal/archive/iso9660"
|
||||
_ "github.com/alist-org/alist/v3/internal/archive/zip"
|
||||
)
|
@ -1,15 +0,0 @@
|
||||
package tool
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
)
|
||||
|
||||
type Tool interface {
|
||||
AcceptedExtensions() []string
|
||||
GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error)
|
||||
List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error)
|
||||
Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error)
|
||||
Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error
|
||||
}
|
@ -1,23 +0,0 @@
|
||||
package tool
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
)
|
||||
|
||||
var (
|
||||
Tools = make(map[string]Tool)
|
||||
)
|
||||
|
||||
func RegisterTool(tool Tool) {
|
||||
for _, ext := range tool.AcceptedExtensions() {
|
||||
Tools[ext] = tool
|
||||
}
|
||||
}
|
||||
|
||||
func GetArchiveTool(ext string) (Tool, error) {
|
||||
t, ok := Tools[ext]
|
||||
if !ok {
|
||||
return nil, errs.UnknownArchiveFormat
|
||||
}
|
||||
return t, nil
|
||||
}
|
@ -1,248 +0,0 @@
|
||||
package zip
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/yeka/zip"
|
||||
)
|
||||
|
||||
type Zip struct {
|
||||
}
|
||||
|
||||
func (*Zip) AcceptedExtensions() []string {
|
||||
return []string{".zip"}
|
||||
}
|
||||
|
||||
func (*Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zipReader, err := zip.NewReader(reader, ss.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encrypted := false
|
||||
dirMap := make(map[string]*model.ObjectTree)
|
||||
dirMap["."] = &model.ObjectTree{}
|
||||
for _, file := range zipReader.File {
|
||||
if file.IsEncrypted() {
|
||||
encrypted = true
|
||||
}
|
||||
|
||||
name := strings.TrimPrefix(decodeName(file.Name), "/")
|
||||
var dir string
|
||||
var dirObj *model.ObjectTree
|
||||
isNewFolder := false
|
||||
if !file.FileInfo().IsDir() {
|
||||
// 先将 文件 添加到 所在的文件夹
|
||||
dir = stdpath.Dir(name)
|
||||
dirObj = dirMap[dir]
|
||||
if dirObj == nil {
|
||||
isNewFolder = true
|
||||
dirObj = &model.ObjectTree{}
|
||||
dirObj.IsFolder = true
|
||||
dirObj.Name = stdpath.Base(dir)
|
||||
dirObj.Modified = file.ModTime()
|
||||
dirMap[dir] = dirObj
|
||||
}
|
||||
dirObj.Children = append(
|
||||
dirObj.Children, &model.ObjectTree{
|
||||
Object: *toModelObj(file.FileInfo()),
|
||||
},
|
||||
)
|
||||
} else {
|
||||
dir = strings.TrimSuffix(name, "/")
|
||||
dirObj = dirMap[dir]
|
||||
if dirObj == nil {
|
||||
isNewFolder = true
|
||||
dirObj = &model.ObjectTree{}
|
||||
dirMap[dir] = dirObj
|
||||
}
|
||||
dirObj.IsFolder = true
|
||||
dirObj.Name = stdpath.Base(dir)
|
||||
dirObj.Modified = file.ModTime()
|
||||
dirObj.Children = make([]model.ObjTree, 0)
|
||||
}
|
||||
if isNewFolder {
|
||||
// 将 文件夹 添加到 父文件夹
|
||||
dir = stdpath.Dir(dir)
|
||||
pDirObj := dirMap[dir]
|
||||
if pDirObj != nil {
|
||||
pDirObj.Children = append(pDirObj.Children, dirObj)
|
||||
continue
|
||||
}
|
||||
|
||||
for {
|
||||
// 考虑压缩包仅记录文件的路径,不记录文件夹
|
||||
pDirObj = &model.ObjectTree{}
|
||||
pDirObj.IsFolder = true
|
||||
pDirObj.Name = stdpath.Base(dir)
|
||||
pDirObj.Modified = file.ModTime()
|
||||
dirMap[dir] = pDirObj
|
||||
pDirObj.Children = append(pDirObj.Children, dirObj)
|
||||
dir = stdpath.Dir(dir)
|
||||
if dirMap[dir] != nil {
|
||||
break
|
||||
}
|
||||
dirObj = pDirObj
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &model.ArchiveMetaInfo{
|
||||
Comment: zipReader.Comment,
|
||||
Encrypted: encrypted,
|
||||
Tree: dirMap["."].GetChildren(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zipReader, err := zip.NewReader(reader, ss.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if args.InnerPath == "/" {
|
||||
ret := make([]model.Obj, 0)
|
||||
passVerified := false
|
||||
var dir *model.Object
|
||||
for _, file := range zipReader.File {
|
||||
if !passVerified && file.IsEncrypted() {
|
||||
file.SetPassword(args.Password)
|
||||
rc, e := file.Open()
|
||||
if e != nil {
|
||||
return nil, filterPassword(e)
|
||||
}
|
||||
_ = rc.Close()
|
||||
passVerified = true
|
||||
}
|
||||
name := strings.TrimSuffix(decodeName(file.Name), "/")
|
||||
if strings.Contains(name, "/") {
|
||||
// 有些压缩包不压缩第一个文件夹
|
||||
strs := strings.Split(name, "/")
|
||||
if dir == nil && len(strs) == 2 {
|
||||
dir = &model.Object{
|
||||
Name: strs[0],
|
||||
Modified: ss.ModTime(),
|
||||
IsFolder: true,
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
ret = append(ret, toModelObj(file.FileInfo()))
|
||||
}
|
||||
if len(ret) == 0 && dir != nil {
|
||||
ret = append(ret, dir)
|
||||
}
|
||||
return ret, nil
|
||||
} else {
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/") + "/"
|
||||
ret := make([]model.Obj, 0)
|
||||
exist := false
|
||||
for _, file := range zipReader.File {
|
||||
name := decodeName(file.Name)
|
||||
dir := stdpath.Dir(strings.TrimSuffix(name, "/")) + "/"
|
||||
if dir != innerPath {
|
||||
continue
|
||||
}
|
||||
exist = true
|
||||
ret = append(ret, toModelObj(file.FileInfo()))
|
||||
}
|
||||
if !exist {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (*Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
zipReader, err := zip.NewReader(reader, ss.GetSize())
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||
for _, file := range zipReader.File {
|
||||
if decodeName(file.Name) == innerPath {
|
||||
if file.IsEncrypted() {
|
||||
file.SetPassword(args.Password)
|
||||
}
|
||||
r, e := file.Open()
|
||||
if e != nil {
|
||||
return nil, 0, e
|
||||
}
|
||||
return r, file.FileInfo().Size(), nil
|
||||
}
|
||||
}
|
||||
return nil, 0, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
func (*Zip) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
zipReader, err := zip.NewReader(reader, ss.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if args.InnerPath == "/" {
|
||||
for i, file := range zipReader.File {
|
||||
name := decodeName(file.Name)
|
||||
err = decompress(file, name, outputPath, args.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(float64(i+1) * 100.0 / float64(len(zipReader.File)))
|
||||
}
|
||||
} else {
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||
innerBase := stdpath.Base(innerPath)
|
||||
createdBaseDir := false
|
||||
for _, file := range zipReader.File {
|
||||
name := decodeName(file.Name)
|
||||
if name == innerPath {
|
||||
err = _decompress(file, outputPath, args.Password, up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
} else if strings.HasPrefix(name, innerPath+"/") {
|
||||
targetPath := stdpath.Join(outputPath, innerBase)
|
||||
if !createdBaseDir {
|
||||
err = os.Mkdir(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
createdBaseDir = true
|
||||
}
|
||||
restPath := strings.TrimPrefix(name, innerPath+"/")
|
||||
err = decompress(file, restPath, targetPath, args.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ tool.Tool = (*Zip)(nil)
|
||||
|
||||
func init() {
|
||||
tool.RegisterTool(&Zip{})
|
||||
}
|
@ -6,119 +6,68 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/cmd/flags"
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/net"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/caarlos0/env/v9"
|
||||
"github.com/OpenListTeam/OpenList/v5/cmd/flags"
|
||||
"github.com/OpenListTeam/OpenList/v5/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v5/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func InitConfig() {
|
||||
if flags.ForceBinDir {
|
||||
if !filepath.IsAbs(flags.DataDir) {
|
||||
ex, err := os.Executable()
|
||||
if err != nil {
|
||||
utils.Log.Fatal(err)
|
||||
}
|
||||
exPath := filepath.Dir(ex)
|
||||
flags.DataDir = filepath.Join(exPath, flags.DataDir)
|
||||
}
|
||||
if !filepath.IsAbs(flags.ConfigFile) {
|
||||
flags.ConfigFile = filepath.Join(flags.PWD(), flags.ConfigFile)
|
||||
}
|
||||
configPath := filepath.Join(flags.DataDir, "config.json")
|
||||
log.Infof("reading config file: %s", configPath)
|
||||
if !utils.Exists(configPath) {
|
||||
log.Infof("config file not exists, creating default config file")
|
||||
_, err := utils.CreateNestedFile(configPath)
|
||||
log.Infoln("reading config file", "@", flags.ConfigFile)
|
||||
|
||||
if !utils.Exists(flags.ConfigFile) {
|
||||
log.Infoln("config file not exists, creating default config file")
|
||||
_, err := utils.CreateNestedFile(flags.ConfigFile)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create config file: %+v", err)
|
||||
log.Fatalln("create config file", ":", err)
|
||||
}
|
||||
conf.Conf = conf.DefaultConfig()
|
||||
LastLaunchedVersion = conf.Version
|
||||
conf.Conf.LastLaunchedVersion = conf.Version
|
||||
if !utils.WriteJsonToFile(configPath, conf.Conf) {
|
||||
log.Fatalf("failed to create default config file")
|
||||
err = utils.WriteJsonToFile(flags.ConfigFile, conf.Conf)
|
||||
if err != nil {
|
||||
log.Fatalln("save default config file", ":", err)
|
||||
}
|
||||
} else {
|
||||
configBytes, err := os.ReadFile(configPath)
|
||||
configBytes, err := os.ReadFile(flags.ConfigFile)
|
||||
if err != nil {
|
||||
log.Fatalf("reading config file error: %+v", err)
|
||||
log.Fatalln("reading config file", ":", err)
|
||||
}
|
||||
conf.Conf = conf.DefaultConfig()
|
||||
err = utils.Json.Unmarshal(configBytes, conf.Conf)
|
||||
if err != nil {
|
||||
log.Fatalf("load config error: %+v", err)
|
||||
log.Fatalln("unmarshal config", ":", err)
|
||||
}
|
||||
LastLaunchedVersion = conf.Conf.LastLaunchedVersion
|
||||
if strings.HasPrefix(conf.Version, "v") || LastLaunchedVersion == "" {
|
||||
conf.Conf.LastLaunchedVersion = conf.Version
|
||||
}
|
||||
// update config.json struct
|
||||
confBody, err := utils.Json.MarshalIndent(conf.Conf, "", " ")
|
||||
err = utils.WriteJsonToFile(flags.ConfigFile, conf.Conf)
|
||||
if err != nil {
|
||||
log.Fatalf("marshal config error: %+v", err)
|
||||
}
|
||||
err = os.WriteFile(configPath, confBody, 0o777)
|
||||
if err != nil {
|
||||
log.Fatalf("update config struct error: %+v", err)
|
||||
log.Fatalln("update config file", ":", err)
|
||||
}
|
||||
}
|
||||
if conf.Conf.MaxConcurrency > 0 {
|
||||
net.DefaultConcurrencyLimit = &net.ConcurrencyLimit{Limit: conf.Conf.MaxConcurrency}
|
||||
}
|
||||
if !conf.Conf.Force {
|
||||
confFromEnv()
|
||||
}
|
||||
|
||||
// convert abs path
|
||||
if !filepath.IsAbs(conf.Conf.TempDir) {
|
||||
absPath, err := filepath.Abs(conf.Conf.TempDir)
|
||||
if err != nil {
|
||||
log.Fatalf("get abs path error: %+v", err)
|
||||
configDir := filepath.Dir(flags.ConfigFile)
|
||||
convertAbsPath := func(path *string) {
|
||||
if *path != "" && !filepath.IsAbs(*path) {
|
||||
*path = filepath.Join(configDir, *path)
|
||||
}
|
||||
conf.Conf.TempDir = absPath
|
||||
}
|
||||
err := os.MkdirAll(conf.Conf.TempDir, 0o777)
|
||||
if err != nil {
|
||||
log.Fatalf("create temp dir error: %+v", err)
|
||||
}
|
||||
convertAbsPath(&conf.Conf.TempDir)
|
||||
convertAbsPath(&conf.Conf.Scheme.CertFile)
|
||||
convertAbsPath(&conf.Conf.Scheme.KeyFile)
|
||||
convertAbsPath(&conf.Conf.Scheme.UnixFile)
|
||||
log.Debugf("config: %+v", conf.Conf)
|
||||
base.InitClient()
|
||||
initURL()
|
||||
|
||||
initSitePath()
|
||||
}
|
||||
|
||||
func confFromEnv() {
|
||||
prefix := "ALIST_"
|
||||
if flags.NoPrefix {
|
||||
prefix = ""
|
||||
}
|
||||
log.Infof("load config from env with prefix: %s", prefix)
|
||||
if err := env.ParseWithOptions(conf.Conf, env.Options{
|
||||
Prefix: prefix,
|
||||
}); err != nil {
|
||||
log.Fatalf("load config from env error: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func initURL() {
|
||||
func initSitePath() {
|
||||
if !strings.Contains(conf.Conf.SiteURL, "://") {
|
||||
conf.Conf.SiteURL = utils.FixAndCleanPath(conf.Conf.SiteURL)
|
||||
}
|
||||
u, err := url.Parse(conf.Conf.SiteURL)
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("can't parse site_url: %+v", err)
|
||||
}
|
||||
conf.URL = u
|
||||
}
|
||||
|
||||
func CleanTempDir() {
|
||||
files, err := os.ReadDir(conf.Conf.TempDir)
|
||||
if err != nil {
|
||||
log.Errorln("failed list temp file: ", err)
|
||||
}
|
||||
for _, file := range files {
|
||||
if err := os.RemoveAll(filepath.Join(conf.Conf.TempDir, file.Name())); err != nil {
|
||||
log.Errorln("failed delete temp file: ", err)
|
||||
}
|
||||
log.Fatalln("parse site_url", ":", err)
|
||||
}
|
||||
conf.SitePath = u.Path
|
||||
}
|
||||
|
13
internal/bootstrap/driver.go
Normal file
13
internal/bootstrap/driver.go
Normal file
@ -0,0 +1,13 @@
|
||||
package bootstrap
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v5/internal/driver"
|
||||
driverS "github.com/OpenListTeam/OpenList/v5/shared/driver"
|
||||
"github.com/hashicorp/go-plugin"
|
||||
)
|
||||
|
||||
func InitDriverPlugins() {
|
||||
driver.PluginMap = map[string]plugin.Plugin{
|
||||
"grpc": &driverS.Plugin{},
|
||||
}
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
package bootstrap
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/offline_download/tool"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
func InitOfflineDownloadTools() {
|
||||
for k, v := range tool.Tools {
|
||||
res, err := v.Init()
|
||||
if err != nil {
|
||||
utils.Log.Warnf("init tool %s failed: %s", k, err)
|
||||
} else {
|
||||
utils.Log.Infof("init tool %s success: %s", k, res)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,216 +1,40 @@
|
||||
package conf
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/alist-org/alist/v3/cmd/flags"
|
||||
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||
)
|
||||
|
||||
type Database struct {
|
||||
Type string `json:"type" env:"TYPE"`
|
||||
Host string `json:"host" env:"HOST"`
|
||||
Port int `json:"port" env:"PORT"`
|
||||
User string `json:"user" env:"USER"`
|
||||
Password string `json:"password" env:"PASS"`
|
||||
Name string `json:"name" env:"NAME"`
|
||||
DBFile string `json:"db_file" env:"FILE"`
|
||||
TablePrefix string `json:"table_prefix" env:"TABLE_PREFIX"`
|
||||
SSLMode string `json:"ssl_mode" env:"SSL_MODE"`
|
||||
DSN string `json:"dsn" env:"DSN"`
|
||||
}
|
||||
|
||||
type Meilisearch struct {
|
||||
Host string `json:"host" env:"HOST"`
|
||||
APIKey string `json:"api_key" env:"API_KEY"`
|
||||
IndexPrefix string `json:"index_prefix" env:"INDEX_PREFIX"`
|
||||
}
|
||||
|
||||
type Scheme struct {
|
||||
Address string `json:"address" env:"ADDR"`
|
||||
HttpPort int `json:"http_port" env:"HTTP_PORT"`
|
||||
HttpsPort int `json:"https_port" env:"HTTPS_PORT"`
|
||||
HttpPort uint16 `json:"http_port" env:"HTTP_PORT"`
|
||||
HttpsPort uint16 `json:"https_port" env:"HTTPS_PORT"`
|
||||
ForceHttps bool `json:"force_https" env:"FORCE_HTTPS"`
|
||||
CertFile string `json:"cert_file" env:"CERT_FILE"`
|
||||
KeyFile string `json:"key_file" env:"KEY_FILE"`
|
||||
UnixFile string `json:"unix_file" env:"UNIX_FILE"`
|
||||
UnixFilePerm string `json:"unix_file_perm" env:"UNIX_FILE_PERM"`
|
||||
EnableH2c bool `json:"enable_h2c" env:"ENABLE_H2C"`
|
||||
}
|
||||
|
||||
type LogConfig struct {
|
||||
Enable bool `json:"enable" env:"LOG_ENABLE"`
|
||||
Name string `json:"name" env:"LOG_NAME"`
|
||||
MaxSize int `json:"max_size" env:"MAX_SIZE"`
|
||||
MaxBackups int `json:"max_backups" env:"MAX_BACKUPS"`
|
||||
MaxAge int `json:"max_age" env:"MAX_AGE"`
|
||||
Compress bool `json:"compress" env:"COMPRESS"`
|
||||
}
|
||||
|
||||
type TaskConfig struct {
|
||||
Workers int `json:"workers" env:"WORKERS"`
|
||||
MaxRetry int `json:"max_retry" env:"MAX_RETRY"`
|
||||
TaskPersistant bool `json:"task_persistant" env:"TASK_PERSISTANT"`
|
||||
}
|
||||
|
||||
type TasksConfig struct {
|
||||
Download TaskConfig `json:"download" envPrefix:"DOWNLOAD_"`
|
||||
Transfer TaskConfig `json:"transfer" envPrefix:"TRANSFER_"`
|
||||
Upload TaskConfig `json:"upload" envPrefix:"UPLOAD_"`
|
||||
Copy TaskConfig `json:"copy" envPrefix:"COPY_"`
|
||||
Decompress TaskConfig `json:"decompress" envPrefix:"DECOMPRESS_"`
|
||||
DecompressUpload TaskConfig `json:"decompress_upload" envPrefix:"DECOMPRESS_UPLOAD_"`
|
||||
AllowRetryCanceled bool `json:"allow_retry_canceled" env:"ALLOW_RETRY_CANCELED"`
|
||||
}
|
||||
|
||||
type Cors struct {
|
||||
AllowOrigins []string `json:"allow_origins" env:"ALLOW_ORIGINS"`
|
||||
AllowMethods []string `json:"allow_methods" env:"ALLOW_METHODS"`
|
||||
AllowHeaders []string `json:"allow_headers" env:"ALLOW_HEADERS"`
|
||||
}
|
||||
|
||||
type S3 struct {
|
||||
Enable bool `json:"enable" env:"ENABLE"`
|
||||
Port int `json:"port" env:"PORT"`
|
||||
SSL bool `json:"ssl" env:"SSL"`
|
||||
}
|
||||
|
||||
type FTP struct {
|
||||
Enable bool `json:"enable" env:"ENABLE"`
|
||||
Listen string `json:"listen" env:"LISTEN"`
|
||||
FindPasvPortAttempts int `json:"find_pasv_port_attempts" env:"FIND_PASV_PORT_ATTEMPTS"`
|
||||
ActiveTransferPortNon20 bool `json:"active_transfer_port_non_20" env:"ACTIVE_TRANSFER_PORT_NON_20"`
|
||||
IdleTimeout int `json:"idle_timeout" env:"IDLE_TIMEOUT"`
|
||||
ConnectionTimeout int `json:"connection_timeout" env:"CONNECTION_TIMEOUT"`
|
||||
DisableActiveMode bool `json:"disable_active_mode" env:"DISABLE_ACTIVE_MODE"`
|
||||
DefaultTransferBinary bool `json:"default_transfer_binary" env:"DEFAULT_TRANSFER_BINARY"`
|
||||
EnableActiveConnIPCheck bool `json:"enable_active_conn_ip_check" env:"ENABLE_ACTIVE_CONN_IP_CHECK"`
|
||||
EnablePasvConnIPCheck bool `json:"enable_pasv_conn_ip_check" env:"ENABLE_PASV_CONN_IP_CHECK"`
|
||||
}
|
||||
|
||||
type SFTP struct {
|
||||
Enable bool `json:"enable" env:"ENABLE"`
|
||||
Listen string `json:"listen" env:"LISTEN"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Force bool `json:"force" env:"FORCE"`
|
||||
SiteURL string `json:"site_url" env:"SITE_URL"`
|
||||
Cdn string `json:"cdn" env:"CDN"`
|
||||
JwtSecret string `json:"jwt_secret" env:"JWT_SECRET"`
|
||||
TokenExpiresIn int `json:"token_expires_in" env:"TOKEN_EXPIRES_IN"`
|
||||
Database Database `json:"database" envPrefix:"DB_"`
|
||||
Meilisearch Meilisearch `json:"meilisearch" envPrefix:"MEILISEARCH_"`
|
||||
Scheme Scheme `json:"scheme"`
|
||||
TempDir string `json:"temp_dir" env:"TEMP_DIR"`
|
||||
BleveDir string `json:"bleve_dir" env:"BLEVE_DIR"`
|
||||
DistDir string `json:"dist_dir"`
|
||||
Log LogConfig `json:"log"`
|
||||
DelayedStart int `json:"delayed_start" env:"DELAYED_START"`
|
||||
MaxConnections int `json:"max_connections" env:"MAX_CONNECTIONS"`
|
||||
MaxConcurrency int `json:"max_concurrency" env:"MAX_CONCURRENCY"`
|
||||
TlsInsecureSkipVerify bool `json:"tls_insecure_skip_verify" env:"TLS_INSECURE_SKIP_VERIFY"`
|
||||
Tasks TasksConfig `json:"tasks" envPrefix:"TASKS_"`
|
||||
Cors Cors `json:"cors" envPrefix:"CORS_"`
|
||||
S3 S3 `json:"s3" envPrefix:"S3_"`
|
||||
FTP FTP `json:"ftp" envPrefix:"FTP_"`
|
||||
SFTP SFTP `json:"sftp" envPrefix:"SFTP_"`
|
||||
LastLaunchedVersion string `json:"last_launched_version"`
|
||||
TempDir string `json:"temp_dir" env:"TEMP_DIR"`
|
||||
SiteURL string `json:"site_url" env:"SITE_URL"`
|
||||
Scheme Scheme `json:"scheme"`
|
||||
Cors Cors `json:"cors" envPrefix:"CORS_"`
|
||||
}
|
||||
|
||||
func DefaultConfig() *Config {
|
||||
tempDir := filepath.Join(flags.DataDir, "temp")
|
||||
indexDir := filepath.Join(flags.DataDir, "bleve")
|
||||
logPath := filepath.Join(flags.DataDir, "log/log.log")
|
||||
dbPath := filepath.Join(flags.DataDir, "data.db")
|
||||
return &Config{
|
||||
TempDir: "temp",
|
||||
Scheme: Scheme{
|
||||
Address: "0.0.0.0",
|
||||
UnixFile: "",
|
||||
HttpPort: 5244,
|
||||
HttpsPort: -1,
|
||||
ForceHttps: false,
|
||||
CertFile: "",
|
||||
KeyFile: "",
|
||||
},
|
||||
JwtSecret: random.String(16),
|
||||
TokenExpiresIn: 48,
|
||||
TempDir: tempDir,
|
||||
Database: Database{
|
||||
Type: "sqlite3",
|
||||
Port: 0,
|
||||
TablePrefix: "x_",
|
||||
DBFile: dbPath,
|
||||
},
|
||||
Meilisearch: Meilisearch{
|
||||
Host: "http://localhost:7700",
|
||||
},
|
||||
BleveDir: indexDir,
|
||||
Log: LogConfig{
|
||||
Enable: true,
|
||||
Name: logPath,
|
||||
MaxSize: 50,
|
||||
MaxBackups: 30,
|
||||
MaxAge: 28,
|
||||
},
|
||||
MaxConnections: 0,
|
||||
MaxConcurrency: 64,
|
||||
TlsInsecureSkipVerify: true,
|
||||
Tasks: TasksConfig{
|
||||
Download: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 1,
|
||||
// TaskPersistant: true,
|
||||
},
|
||||
Transfer: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
// TaskPersistant: true,
|
||||
},
|
||||
Upload: TaskConfig{
|
||||
Workers: 5,
|
||||
},
|
||||
Copy: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
// TaskPersistant: true,
|
||||
},
|
||||
Decompress: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
// TaskPersistant: true,
|
||||
},
|
||||
DecompressUpload: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
},
|
||||
AllowRetryCanceled: false,
|
||||
Address: "0.0.0.0",
|
||||
HttpPort: 5244,
|
||||
},
|
||||
Cors: Cors{
|
||||
AllowOrigins: []string{"*"},
|
||||
AllowMethods: []string{"*"},
|
||||
AllowHeaders: []string{"*"},
|
||||
},
|
||||
S3: S3{
|
||||
Enable: false,
|
||||
Port: 5246,
|
||||
SSL: false,
|
||||
},
|
||||
FTP: FTP{
|
||||
Enable: false,
|
||||
Listen: ":5221",
|
||||
FindPasvPortAttempts: 50,
|
||||
ActiveTransferPortNon20: false,
|
||||
IdleTimeout: 900,
|
||||
ConnectionTimeout: 30,
|
||||
DisableActiveMode: false,
|
||||
DefaultTransferBinary: false,
|
||||
EnableActiveConnIPCheck: true,
|
||||
EnablePasvConnIPCheck: true,
|
||||
},
|
||||
SFTP: SFTP{
|
||||
Enable: false,
|
||||
Listen: ":5222",
|
||||
},
|
||||
LastLaunchedVersion: "",
|
||||
}
|
||||
}
|
||||
|
@ -1,33 +1,10 @@
|
||||
package conf
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"regexp"
|
||||
)
|
||||
import "regexp"
|
||||
|
||||
var (
|
||||
BuiltAt string
|
||||
GitAuthor string
|
||||
GitCommit string
|
||||
Version string = "dev"
|
||||
WebVersion string
|
||||
Conf *Config
|
||||
SitePath string
|
||||
)
|
||||
|
||||
var (
|
||||
Conf *Config
|
||||
URL *url.URL
|
||||
)
|
||||
|
||||
var SlicesMap = make(map[string][]string)
|
||||
var FilenameCharMap = make(map[string]string)
|
||||
var PrivacyReg []*regexp.Regexp
|
||||
|
||||
var (
|
||||
// StoragesLoaded loaded success if empty
|
||||
StoragesLoaded = false
|
||||
)
|
||||
var (
|
||||
RawIndexHtml string
|
||||
ManageHtml string
|
||||
IndexHtml string
|
||||
)
|
||||
|
@ -1,20 +0,0 @@
|
||||
package driver
|
||||
|
||||
type Config struct {
|
||||
Name string `json:"name"`
|
||||
LocalSort bool `json:"local_sort"`
|
||||
OnlyLocal bool `json:"only_local"`
|
||||
OnlyProxy bool `json:"only_proxy"`
|
||||
NoCache bool `json:"no_cache"`
|
||||
NoUpload bool `json:"no_upload"`
|
||||
NeedMs bool `json:"need_ms"` // if need get message from user, such as validate code
|
||||
DefaultRoot string `json:"default_root"`
|
||||
CheckStatus bool `json:"-"`
|
||||
Alert string `json:"alert"` //info,success,warning,danger
|
||||
NoOverwriteUpload bool `json:"-"` // whether to support overwrite upload
|
||||
ProxyRangeOption bool `json:"-"`
|
||||
}
|
||||
|
||||
func (c Config) MustProxy() bool {
|
||||
return c.OnlyProxy || c.OnlyLocal
|
||||
}
|
9
internal/driver/var.go
Normal file
9
internal/driver/var.go
Normal file
@ -0,0 +1,9 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/go-plugin"
|
||||
)
|
||||
|
||||
var (
|
||||
PluginMap map[string]plugin.Plugin
|
||||
)
|
@ -1,179 +0,0 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"net/http"
|
||||
stdpath "path"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/xhofe/tache"
|
||||
)
|
||||
|
||||
type CopyTask struct {
|
||||
task.TaskExtension
|
||||
Status string `json:"-"` //don't save status to save space
|
||||
SrcObjPath string `json:"src_path"`
|
||||
DstDirPath string `json:"dst_path"`
|
||||
srcStorage driver.Driver `json:"-"`
|
||||
dstStorage driver.Driver `json:"-"`
|
||||
SrcStorageMp string `json:"src_storage_mp"`
|
||||
DstStorageMp string `json:"dst_storage_mp"`
|
||||
}
|
||||
|
||||
func (t *CopyTask) GetName() string {
|
||||
return fmt.Sprintf("copy [%s](%s) to [%s](%s)", t.SrcStorageMp, t.SrcObjPath, t.DstStorageMp, t.DstDirPath)
|
||||
}
|
||||
|
||||
func (t *CopyTask) GetStatus() string {
|
||||
return t.Status
|
||||
}
|
||||
|
||||
func (t *CopyTask) Run() error {
|
||||
t.ReinitCtx()
|
||||
t.ClearEndTime()
|
||||
t.SetStartTime(time.Now())
|
||||
defer func() { t.SetEndTime(time.Now()) }()
|
||||
var err error
|
||||
if t.srcStorage == nil {
|
||||
t.srcStorage, err = op.GetStorageByMountPath(t.SrcStorageMp)
|
||||
}
|
||||
if t.dstStorage == nil {
|
||||
t.dstStorage, err = op.GetStorageByMountPath(t.DstStorageMp)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed get storage")
|
||||
}
|
||||
return copyBetween2Storages(t, t.srcStorage, t.dstStorage, t.SrcObjPath, t.DstDirPath)
|
||||
}
|
||||
|
||||
var CopyTaskManager *tache.Manager[*CopyTask]
|
||||
|
||||
// Copy if in the same storage, call move method
|
||||
// if not, add copy task
|
||||
func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskExtensionInfo, error) {
|
||||
srcStorage, srcObjActualPath, err := op.GetStorageAndActualPath(srcObjPath)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed get src storage")
|
||||
}
|
||||
dstStorage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed get dst storage")
|
||||
}
|
||||
// copy if in the same storage, just call driver.Copy
|
||||
if srcStorage.GetStorage() == dstStorage.GetStorage() {
|
||||
err = op.Copy(ctx, srcStorage, srcObjActualPath, dstDirActualPath, lazyCache...)
|
||||
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.NotSupport) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if ctx.Value(conf.NoTaskKey) != nil {
|
||||
srcObj, err := op.Get(ctx, srcStorage, srcObjActualPath)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed get src [%s] file", srcObjPath)
|
||||
}
|
||||
if !srcObj.IsDir() {
|
||||
// copy file directly
|
||||
link, _, err := op.Link(ctx, srcStorage, srcObjActualPath, model.LinkArgs{
|
||||
Header: http.Header{},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed get [%s] link", srcObjPath)
|
||||
}
|
||||
fs := stream.FileStream{
|
||||
Obj: srcObj,
|
||||
Ctx: ctx,
|
||||
}
|
||||
// any link provided is seekable
|
||||
ss, err := stream.NewSeekableStream(fs, link)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed get [%s] stream", srcObjPath)
|
||||
}
|
||||
return nil, op.Put(ctx, dstStorage, dstDirActualPath, ss, nil, false)
|
||||
}
|
||||
}
|
||||
// not in the same storage
|
||||
taskCreator, _ := ctx.Value("user").(*model.User)
|
||||
t := &CopyTask{
|
||||
TaskExtension: task.TaskExtension{
|
||||
Creator: taskCreator,
|
||||
},
|
||||
srcStorage: srcStorage,
|
||||
dstStorage: dstStorage,
|
||||
SrcObjPath: srcObjActualPath,
|
||||
DstDirPath: dstDirActualPath,
|
||||
SrcStorageMp: srcStorage.GetStorage().MountPath,
|
||||
DstStorageMp: dstStorage.GetStorage().MountPath,
|
||||
}
|
||||
CopyTaskManager.Add(t)
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func copyBetween2Storages(t *CopyTask, srcStorage, dstStorage driver.Driver, srcObjPath, dstDirPath string) error {
|
||||
t.Status = "getting src object"
|
||||
srcObj, err := op.Get(t.Ctx(), srcStorage, srcObjPath)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed get src [%s] file", srcObjPath)
|
||||
}
|
||||
if srcObj.IsDir() {
|
||||
t.Status = "src object is dir, listing objs"
|
||||
objs, err := op.List(t.Ctx(), srcStorage, srcObjPath, model.ListArgs{})
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed list src [%s] objs", srcObjPath)
|
||||
}
|
||||
for _, obj := range objs {
|
||||
if utils.IsCanceled(t.Ctx()) {
|
||||
return nil
|
||||
}
|
||||
srcObjPath := stdpath.Join(srcObjPath, obj.GetName())
|
||||
dstObjPath := stdpath.Join(dstDirPath, srcObj.GetName())
|
||||
CopyTaskManager.Add(&CopyTask{
|
||||
TaskExtension: task.TaskExtension{
|
||||
Creator: t.GetCreator(),
|
||||
},
|
||||
srcStorage: srcStorage,
|
||||
dstStorage: dstStorage,
|
||||
SrcObjPath: srcObjPath,
|
||||
DstDirPath: dstObjPath,
|
||||
SrcStorageMp: srcStorage.GetStorage().MountPath,
|
||||
DstStorageMp: dstStorage.GetStorage().MountPath,
|
||||
})
|
||||
}
|
||||
t.Status = "src object is dir, added all copy tasks of objs"
|
||||
return nil
|
||||
}
|
||||
return copyFileBetween2Storages(t, srcStorage, dstStorage, srcObjPath, dstDirPath)
|
||||
}
|
||||
|
||||
func copyFileBetween2Storages(tsk *CopyTask, srcStorage, dstStorage driver.Driver, srcFilePath, dstDirPath string) error {
|
||||
srcFile, err := op.Get(tsk.Ctx(), srcStorage, srcFilePath)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed get src [%s] file", srcFilePath)
|
||||
}
|
||||
tsk.SetTotalBytes(srcFile.GetSize())
|
||||
link, _, err := op.Link(tsk.Ctx(), srcStorage, srcFilePath, model.LinkArgs{
|
||||
Header: http.Header{},
|
||||
})
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed get [%s] link", srcFilePath)
|
||||
}
|
||||
fs := stream.FileStream{
|
||||
Obj: srcFile,
|
||||
Ctx: tsk.Ctx(),
|
||||
}
|
||||
// any link provided is seekable
|
||||
ss, err := stream.NewSeekableStream(fs, link)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed get [%s] stream", srcFilePath)
|
||||
}
|
||||
return op.Put(tsk.Ctx(), dstStorage, dstDirPath, ss, tsk.SetProgress, true)
|
||||
}
|
@ -1,25 +0,0 @@
|
||||
package model
|
||||
|
||||
import "io"
|
||||
|
||||
// File is basic file level accessing interface
|
||||
type File interface {
|
||||
io.Reader
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
io.Closer
|
||||
}
|
||||
|
||||
type NopMFileIF interface {
|
||||
io.Reader
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
}
|
||||
type NopMFile struct {
|
||||
NopMFileIF
|
||||
}
|
||||
|
||||
func (NopMFile) Close() error { return nil }
|
||||
func NewNopMFile(r NopMFileIF) File {
|
||||
return NopMFile{r}
|
||||
}
|
@ -1,39 +0,0 @@
|
||||
package model
|
||||
|
||||
const (
|
||||
SINGLE = iota
|
||||
SITE
|
||||
STYLE
|
||||
PREVIEW
|
||||
GLOBAL
|
||||
OFFLINE_DOWNLOAD
|
||||
INDEX
|
||||
SSO
|
||||
LDAP
|
||||
S3
|
||||
FTP
|
||||
TRAFFIC
|
||||
)
|
||||
|
||||
const (
|
||||
PUBLIC = iota
|
||||
PRIVATE
|
||||
READONLY
|
||||
DEPRECATED
|
||||
)
|
||||
|
||||
type SettingItem struct {
|
||||
Key string `json:"key" gorm:"primaryKey" binding:"required"` // unique key
|
||||
Value string `json:"value"` // value
|
||||
PreDefault string `json:"-" gorm:"-:all"` // deprecated value
|
||||
Help string `json:"help"` // help message
|
||||
Type string `json:"type"` // string, number, bool, select
|
||||
Options string `json:"options"` // values for select
|
||||
Group int `json:"group"` // use to group setting in frontend
|
||||
Flag int `json:"flag"` // 0 = public, 1 = private, 2 = readonly, 3 = deprecated, etc.
|
||||
Index uint `json:"index"`
|
||||
}
|
||||
|
||||
func (s SettingItem) IsDeprecated() bool {
|
||||
return s.Flag == DEPRECATED
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
package offline_download
|
||||
|
||||
import (
|
||||
_ "github.com/alist-org/alist/v3/internal/offline_download/115"
|
||||
_ "github.com/alist-org/alist/v3/internal/offline_download/aria2"
|
||||
_ "github.com/alist-org/alist/v3/internal/offline_download/http"
|
||||
_ "github.com/alist-org/alist/v3/internal/offline_download/pikpak"
|
||||
_ "github.com/alist-org/alist/v3/internal/offline_download/qbit"
|
||||
_ "github.com/alist-org/alist/v3/internal/offline_download/thunder"
|
||||
_ "github.com/alist-org/alist/v3/internal/offline_download/transmission"
|
||||
)
|
@ -1,275 +0,0 @@
|
||||
package tool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/xhofe/tache"
|
||||
"net/http"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TransferTask struct {
|
||||
task.TaskExtension
|
||||
Status string `json:"-"` //don't save status to save space
|
||||
SrcObjPath string `json:"src_obj_path"`
|
||||
DstDirPath string `json:"dst_dir_path"`
|
||||
SrcStorage driver.Driver `json:"-"`
|
||||
DstStorage driver.Driver `json:"-"`
|
||||
SrcStorageMp string `json:"src_storage_mp"`
|
||||
DstStorageMp string `json:"dst_storage_mp"`
|
||||
DeletePolicy DeletePolicy `json:"delete_policy"`
|
||||
}
|
||||
|
||||
func (t *TransferTask) Run() error {
|
||||
t.ReinitCtx()
|
||||
t.ClearEndTime()
|
||||
t.SetStartTime(time.Now())
|
||||
defer func() { t.SetEndTime(time.Now()) }()
|
||||
if t.SrcStorage == nil {
|
||||
return transferStdPath(t)
|
||||
} else {
|
||||
return transferObjPath(t)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TransferTask) GetName() string {
|
||||
return fmt.Sprintf("transfer [%s](%s) to [%s](%s)", t.SrcStorageMp, t.SrcObjPath, t.DstStorageMp, t.DstDirPath)
|
||||
}
|
||||
|
||||
func (t *TransferTask) GetStatus() string {
|
||||
return t.Status
|
||||
}
|
||||
|
||||
func (t *TransferTask) OnSucceeded() {
|
||||
if t.DeletePolicy == DeleteOnUploadSucceed || t.DeletePolicy == DeleteAlways {
|
||||
if t.SrcStorage == nil {
|
||||
removeStdTemp(t)
|
||||
} else {
|
||||
removeObjTemp(t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TransferTask) OnFailed() {
|
||||
if t.DeletePolicy == DeleteOnUploadFailed || t.DeletePolicy == DeleteAlways {
|
||||
if t.SrcStorage == nil {
|
||||
removeStdTemp(t)
|
||||
} else {
|
||||
removeObjTemp(t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
TransferTaskManager *tache.Manager[*TransferTask]
|
||||
)
|
||||
|
||||
func transferStd(ctx context.Context, tempDir, dstDirPath string, deletePolicy DeletePolicy) error {
|
||||
dstStorage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed get dst storage")
|
||||
}
|
||||
entries, err := os.ReadDir(tempDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
taskCreator, _ := ctx.Value("user").(*model.User)
|
||||
for _, entry := range entries {
|
||||
t := &TransferTask{
|
||||
TaskExtension: task.TaskExtension{
|
||||
Creator: taskCreator,
|
||||
},
|
||||
SrcObjPath: stdpath.Join(tempDir, entry.Name()),
|
||||
DstDirPath: dstDirActualPath,
|
||||
DstStorage: dstStorage,
|
||||
DstStorageMp: dstStorage.GetStorage().MountPath,
|
||||
DeletePolicy: deletePolicy,
|
||||
}
|
||||
TransferTaskManager.Add(t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func transferStdPath(t *TransferTask) error {
|
||||
t.Status = "getting src object"
|
||||
info, err := os.Stat(t.SrcObjPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
t.Status = "src object is dir, listing objs"
|
||||
entries, err := os.ReadDir(t.SrcObjPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
srcRawPath := stdpath.Join(t.SrcObjPath, entry.Name())
|
||||
dstObjPath := stdpath.Join(t.DstDirPath, info.Name())
|
||||
t := &TransferTask{
|
||||
TaskExtension: task.TaskExtension{
|
||||
Creator: t.Creator,
|
||||
},
|
||||
SrcObjPath: srcRawPath,
|
||||
DstDirPath: dstObjPath,
|
||||
DstStorage: t.DstStorage,
|
||||
SrcStorageMp: t.SrcStorageMp,
|
||||
DstStorageMp: t.DstStorageMp,
|
||||
DeletePolicy: t.DeletePolicy,
|
||||
}
|
||||
TransferTaskManager.Add(t)
|
||||
}
|
||||
t.Status = "src object is dir, added all transfer tasks of files"
|
||||
return nil
|
||||
}
|
||||
return transferStdFile(t)
|
||||
}
|
||||
|
||||
func transferStdFile(t *TransferTask) error {
|
||||
rc, err := os.Open(t.SrcObjPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open file %s", t.SrcObjPath)
|
||||
}
|
||||
info, err := rc.Stat()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get file %s", t.SrcObjPath)
|
||||
}
|
||||
mimetype := utils.GetMimeType(t.SrcObjPath)
|
||||
s := &stream.FileStream{
|
||||
Ctx: nil,
|
||||
Obj: &model.Object{
|
||||
Name: filepath.Base(t.SrcObjPath),
|
||||
Size: info.Size(),
|
||||
Modified: info.ModTime(),
|
||||
IsFolder: false,
|
||||
},
|
||||
Reader: rc,
|
||||
Mimetype: mimetype,
|
||||
Closers: utils.NewClosers(rc),
|
||||
}
|
||||
t.SetTotalBytes(info.Size())
|
||||
return op.Put(t.Ctx(), t.DstStorage, t.DstDirPath, s, t.SetProgress)
|
||||
}
|
||||
|
||||
func removeStdTemp(t *TransferTask) {
|
||||
info, err := os.Stat(t.SrcObjPath)
|
||||
if err != nil || info.IsDir() {
|
||||
return
|
||||
}
|
||||
if err := os.Remove(t.SrcObjPath); err != nil {
|
||||
log.Errorf("failed to delete temp file %s, error: %s", t.SrcObjPath, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func transferObj(ctx context.Context, tempDir, dstDirPath string, deletePolicy DeletePolicy) error {
|
||||
srcStorage, srcObjActualPath, err := op.GetStorageAndActualPath(tempDir)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed get src storage")
|
||||
}
|
||||
dstStorage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed get dst storage")
|
||||
}
|
||||
objs, err := op.List(ctx, srcStorage, srcObjActualPath, model.ListArgs{})
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed list src [%s] objs", tempDir)
|
||||
}
|
||||
taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed
|
||||
for _, obj := range objs {
|
||||
t := &TransferTask{
|
||||
TaskExtension: task.TaskExtension{
|
||||
Creator: taskCreator,
|
||||
},
|
||||
SrcObjPath: stdpath.Join(srcObjActualPath, obj.GetName()),
|
||||
DstDirPath: dstDirActualPath,
|
||||
SrcStorage: srcStorage,
|
||||
DstStorage: dstStorage,
|
||||
SrcStorageMp: srcStorage.GetStorage().MountPath,
|
||||
DstStorageMp: dstStorage.GetStorage().MountPath,
|
||||
DeletePolicy: deletePolicy,
|
||||
}
|
||||
TransferTaskManager.Add(t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func transferObjPath(t *TransferTask) error {
|
||||
t.Status = "getting src object"
|
||||
srcObj, err := op.Get(t.Ctx(), t.SrcStorage, t.SrcObjPath)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed get src [%s] file", t.SrcObjPath)
|
||||
}
|
||||
if srcObj.IsDir() {
|
||||
t.Status = "src object is dir, listing objs"
|
||||
objs, err := op.List(t.Ctx(), t.SrcStorage, t.SrcObjPath, model.ListArgs{})
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed list src [%s] objs", t.SrcObjPath)
|
||||
}
|
||||
for _, obj := range objs {
|
||||
if utils.IsCanceled(t.Ctx()) {
|
||||
return nil
|
||||
}
|
||||
srcObjPath := stdpath.Join(t.SrcObjPath, obj.GetName())
|
||||
dstObjPath := stdpath.Join(t.DstDirPath, srcObj.GetName())
|
||||
TransferTaskManager.Add(&TransferTask{
|
||||
TaskExtension: task.TaskExtension{
|
||||
Creator: t.Creator,
|
||||
},
|
||||
SrcObjPath: srcObjPath,
|
||||
DstDirPath: dstObjPath,
|
||||
SrcStorage: t.SrcStorage,
|
||||
DstStorage: t.DstStorage,
|
||||
SrcStorageMp: t.SrcStorageMp,
|
||||
DstStorageMp: t.DstStorageMp,
|
||||
DeletePolicy: t.DeletePolicy,
|
||||
})
|
||||
}
|
||||
t.Status = "src object is dir, added all transfer tasks of objs"
|
||||
return nil
|
||||
}
|
||||
return transferObjFile(t)
|
||||
}
|
||||
|
||||
func transferObjFile(t *TransferTask) error {
|
||||
srcFile, err := op.Get(t.Ctx(), t.SrcStorage, t.SrcObjPath)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed get src [%s] file", t.SrcObjPath)
|
||||
}
|
||||
link, _, err := op.Link(t.Ctx(), t.SrcStorage, t.SrcObjPath, model.LinkArgs{
|
||||
Header: http.Header{},
|
||||
})
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed get [%s] link", t.SrcObjPath)
|
||||
}
|
||||
fs := stream.FileStream{
|
||||
Obj: srcFile,
|
||||
Ctx: t.Ctx(),
|
||||
}
|
||||
// any link provided is seekable
|
||||
ss, err := stream.NewSeekableStream(fs, link)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed get [%s] stream", t.SrcObjPath)
|
||||
}
|
||||
t.SetTotalBytes(srcFile.GetSize())
|
||||
return op.Put(t.Ctx(), t.DstStorage, t.DstDirPath, ss, t.SetProgress)
|
||||
}
|
||||
|
||||
func removeObjTemp(t *TransferTask) {
|
||||
srcObj, err := op.Get(t.Ctx(), t.SrcStorage, t.SrcObjPath)
|
||||
if err != nil || srcObj.IsDir() {
|
||||
return
|
||||
}
|
||||
if err := op.Remove(t.Ctx(), t.SrcStorage, t.SrcObjPath); err != nil {
|
||||
log.Errorf("failed to delete temp obj %s, error: %s", t.SrcObjPath, err.Error())
|
||||
}
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
package op
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetStorageAndActualPath Get the corresponding storage and actual path
|
||||
// for path: remove the mount path prefix and join the actual root folder if exists
|
||||
func GetStorageAndActualPath(rawPath string) (storage driver.Driver, actualPath string, err error) {
|
||||
rawPath = utils.FixAndCleanPath(rawPath)
|
||||
storage = GetBalancedStorage(rawPath)
|
||||
if storage == nil {
|
||||
if rawPath == "/" {
|
||||
err = errs.NewErr(errs.StorageNotFound, "please add a storage first")
|
||||
return
|
||||
}
|
||||
err = errs.NewErr(errs.StorageNotFound, "rawPath: %s", rawPath)
|
||||
return
|
||||
}
|
||||
log.Debugln("use storage: ", storage.GetStorage().MountPath)
|
||||
mountPath := utils.GetActualMountPath(storage.GetStorage().MountPath)
|
||||
actualPath = utils.FixAndCleanPath(strings.TrimPrefix(rawPath, mountPath))
|
||||
return
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
package search
|
||||
|
||||
import (
|
||||
_ "github.com/alist-org/alist/v3/internal/search/bleve"
|
||||
_ "github.com/alist-org/alist/v3/internal/search/db"
|
||||
_ "github.com/alist-org/alist/v3/internal/search/db_non_full_text"
|
||||
_ "github.com/alist-org/alist/v3/internal/search/meilisearch"
|
||||
)
|
@ -1,227 +0,0 @@
|
||||
package meilisearch
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/search/searcher"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/google/uuid"
|
||||
"github.com/meilisearch/meilisearch-go"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type searchDocument struct {
|
||||
ID string `json:"id"`
|
||||
model.SearchNode
|
||||
}
|
||||
|
||||
type Meilisearch struct {
|
||||
Client *meilisearch.Client
|
||||
IndexUid string
|
||||
FilterableAttributes []string
|
||||
SearchableAttributes []string
|
||||
}
|
||||
|
||||
func (m *Meilisearch) Config() searcher.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (m *Meilisearch) Search(ctx context.Context, req model.SearchReq) ([]model.SearchNode, int64, error) {
|
||||
mReq := &meilisearch.SearchRequest{
|
||||
AttributesToSearchOn: m.SearchableAttributes,
|
||||
Page: int64(req.Page),
|
||||
HitsPerPage: int64(req.PerPage),
|
||||
}
|
||||
if req.Scope != 0 {
|
||||
mReq.Filter = fmt.Sprintf("is_dir = %v", req.Scope == 1)
|
||||
}
|
||||
search, err := m.Client.Index(m.IndexUid).Search(req.Keywords, mReq)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
nodes, err := utils.SliceConvert(search.Hits, func(src any) (model.SearchNode, error) {
|
||||
srcMap := src.(map[string]any)
|
||||
return model.SearchNode{
|
||||
Parent: srcMap["parent"].(string),
|
||||
Name: srcMap["name"].(string),
|
||||
IsDir: srcMap["is_dir"].(bool),
|
||||
Size: int64(srcMap["size"].(float64)),
|
||||
}, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return nodes, search.TotalHits, nil
|
||||
}
|
||||
|
||||
func (m *Meilisearch) Index(ctx context.Context, node model.SearchNode) error {
|
||||
return m.BatchIndex(ctx, []model.SearchNode{node})
|
||||
}
|
||||
|
||||
func (m *Meilisearch) BatchIndex(ctx context.Context, nodes []model.SearchNode) error {
|
||||
documents, _ := utils.SliceConvert(nodes, func(src model.SearchNode) (*searchDocument, error) {
|
||||
|
||||
return &searchDocument{
|
||||
ID: uuid.NewString(),
|
||||
SearchNode: src,
|
||||
}, nil
|
||||
})
|
||||
|
||||
_, err := m.Client.Index(m.IndexUid).AddDocuments(documents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//// Wait for the task to complete and check
|
||||
//forTask, err := m.Client.WaitForTask(task.TaskUID, meilisearch.WaitParams{
|
||||
// Context: ctx,
|
||||
// Interval: time.Millisecond * 50,
|
||||
//})
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//if forTask.Status != meilisearch.TaskStatusSucceeded {
|
||||
// return fmt.Errorf("BatchIndex failed, task status is %s", forTask.Status)
|
||||
//}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Meilisearch) getDocumentsByParent(ctx context.Context, parent string) ([]*searchDocument, error) {
|
||||
var result meilisearch.DocumentsResult
|
||||
err := m.Client.Index(m.IndexUid).GetDocuments(&meilisearch.DocumentsQuery{
|
||||
Filter: fmt.Sprintf("parent = '%s'", strings.ReplaceAll(parent, "'", "\\'")),
|
||||
Limit: int64(model.MaxInt),
|
||||
}, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(result.Results, func(src map[string]any) (*searchDocument, error) {
|
||||
return &searchDocument{
|
||||
ID: src["id"].(string),
|
||||
SearchNode: model.SearchNode{
|
||||
Parent: src["parent"].(string),
|
||||
Name: src["name"].(string),
|
||||
IsDir: src["is_dir"].(bool),
|
||||
Size: int64(src["size"].(float64)),
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Meilisearch) Get(ctx context.Context, parent string) ([]model.SearchNode, error) {
|
||||
result, err := m.getDocumentsByParent(ctx, parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(result, func(src *searchDocument) (model.SearchNode, error) {
|
||||
return src.SearchNode, nil
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func (m *Meilisearch) getParentsByPrefix(ctx context.Context, parent string) ([]string, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
parents := []string{parent}
|
||||
get, err := m.getDocumentsByParent(ctx, parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, node := range get {
|
||||
if node.IsDir {
|
||||
arr, err := m.getParentsByPrefix(ctx, path.Join(node.Parent, node.Name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parents = append(parents, arr...)
|
||||
}
|
||||
}
|
||||
return parents, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Meilisearch) DelDirChild(ctx context.Context, prefix string) error {
|
||||
dfs, err := m.getParentsByPrefix(ctx, utils.FixAndCleanPath(prefix))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
utils.SliceReplace(dfs, func(src string) string {
|
||||
return "'" + strings.ReplaceAll(src, "'", "\\'") + "'"
|
||||
})
|
||||
s := fmt.Sprintf("parent IN [%s]", strings.Join(dfs, ","))
|
||||
task, err := m.Client.Index(m.IndexUid).DeleteDocumentsByFilter(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
taskStatus, err := m.getTaskStatus(ctx, task.TaskUID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if taskStatus != meilisearch.TaskStatusSucceeded {
|
||||
return fmt.Errorf("DelDir failed, task status is %s", taskStatus)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Meilisearch) Del(ctx context.Context, prefix string) error {
|
||||
prefix = utils.FixAndCleanPath(prefix)
|
||||
dir, name := path.Split(prefix)
|
||||
get, err := m.getDocumentsByParent(ctx, dir[:len(dir)-1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var document *searchDocument
|
||||
for _, v := range get {
|
||||
if v.Name == name {
|
||||
document = v
|
||||
break
|
||||
}
|
||||
}
|
||||
if document == nil {
|
||||
// Defensive programming. Document may be the folder, try deleting Child
|
||||
return m.DelDirChild(ctx, prefix)
|
||||
}
|
||||
if document.IsDir {
|
||||
err = m.DelDirChild(ctx, prefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
task, err := m.Client.Index(m.IndexUid).DeleteDocument(document.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
taskStatus, err := m.getTaskStatus(ctx, task.TaskUID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if taskStatus != meilisearch.TaskStatusSucceeded {
|
||||
return fmt.Errorf("DelDir failed, task status is %s", taskStatus)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Meilisearch) Release(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Meilisearch) Clear(ctx context.Context) error {
|
||||
_, err := m.Client.Index(m.IndexUid).DeleteAllDocuments()
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *Meilisearch) getTaskStatus(ctx context.Context, taskUID int64) (meilisearch.TaskStatus, error) {
|
||||
forTask, err := m.Client.WaitForTask(taskUID, meilisearch.WaitParams{
|
||||
Context: ctx,
|
||||
Interval: time.Second,
|
||||
})
|
||||
if err != nil {
|
||||
return meilisearch.TaskStatusUnknown, err
|
||||
}
|
||||
return forTask.Status, nil
|
||||
}
|
@ -1,590 +0,0 @@
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type FileStream struct {
|
||||
Ctx context.Context
|
||||
model.Obj
|
||||
io.Reader
|
||||
Mimetype string
|
||||
WebPutAsTask bool
|
||||
ForceStreamUpload bool
|
||||
Exist model.Obj //the file existed in the destination, we can reuse some info since we wil overwrite it
|
||||
utils.Closers
|
||||
tmpFile *os.File //if present, tmpFile has full content, it will be deleted at last
|
||||
peekBuff *bytes.Reader
|
||||
}
|
||||
|
||||
func (f *FileStream) GetSize() int64 {
|
||||
if f.tmpFile != nil {
|
||||
info, err := f.tmpFile.Stat()
|
||||
if err == nil {
|
||||
return info.Size()
|
||||
}
|
||||
}
|
||||
return f.Obj.GetSize()
|
||||
}
|
||||
|
||||
func (f *FileStream) GetMimetype() string {
|
||||
return f.Mimetype
|
||||
}
|
||||
|
||||
func (f *FileStream) NeedStore() bool {
|
||||
return f.WebPutAsTask
|
||||
}
|
||||
|
||||
func (f *FileStream) IsForceStreamUpload() bool {
|
||||
return f.ForceStreamUpload
|
||||
}
|
||||
|
||||
func (f *FileStream) Close() error {
|
||||
var err1, err2 error
|
||||
|
||||
err1 = f.Closers.Close()
|
||||
if errors.Is(err1, os.ErrClosed) {
|
||||
err1 = nil
|
||||
}
|
||||
if f.tmpFile != nil {
|
||||
err2 = os.RemoveAll(f.tmpFile.Name())
|
||||
if err2 != nil {
|
||||
err2 = errs.NewErr(err2, "failed to remove tmpFile [%s]", f.tmpFile.Name())
|
||||
} else {
|
||||
f.tmpFile = nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(err1, err2)
|
||||
}
|
||||
|
||||
func (f *FileStream) GetExist() model.Obj {
|
||||
return f.Exist
|
||||
}
|
||||
func (f *FileStream) SetExist(obj model.Obj) {
|
||||
f.Exist = obj
|
||||
}
|
||||
|
||||
// CacheFullInTempFile save all data into tmpFile. Not recommended since it wears disk,
|
||||
// and can't start upload until the file is written. It's not thread-safe!
|
||||
func (f *FileStream) CacheFullInTempFile() (model.File, error) {
|
||||
if f.tmpFile != nil {
|
||||
return f.tmpFile, nil
|
||||
}
|
||||
if file, ok := f.Reader.(model.File); ok {
|
||||
return file, nil
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(f.Reader, f.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Add(tmpF)
|
||||
f.tmpFile = tmpF
|
||||
f.Reader = tmpF
|
||||
return f.tmpFile, nil
|
||||
}
|
||||
|
||||
func (f *FileStream) CacheFullInTempFileAndUpdateProgress(up model.UpdateProgress) (model.File, error) {
|
||||
if f.tmpFile != nil {
|
||||
return f.tmpFile, nil
|
||||
}
|
||||
if file, ok := f.Reader.(model.File); ok {
|
||||
return file, nil
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(&ReaderUpdatingProgress{
|
||||
Reader: f,
|
||||
UpdateProgress: up,
|
||||
}, f.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Add(tmpF)
|
||||
f.tmpFile = tmpF
|
||||
f.Reader = tmpF
|
||||
return f.tmpFile, nil
|
||||
}
|
||||
|
||||
const InMemoryBufMaxSize = 10 // Megabytes
|
||||
const InMemoryBufMaxSizeBytes = InMemoryBufMaxSize * 1024 * 1024
|
||||
|
||||
// RangeRead have to cache all data first since only Reader is provided.
|
||||
// also support a peeking RangeRead at very start, but won't buffer more than 10MB data in memory
|
||||
func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
if httpRange.Length == -1 {
|
||||
// 参考 internal/net/request.go
|
||||
httpRange.Length = f.GetSize() - httpRange.Start
|
||||
}
|
||||
if f.peekBuff != nil && httpRange.Start < int64(f.peekBuff.Len()) && httpRange.Start+httpRange.Length-1 < int64(f.peekBuff.Len()) {
|
||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
if f.tmpFile == nil {
|
||||
if httpRange.Start == 0 && httpRange.Length <= InMemoryBufMaxSizeBytes && f.peekBuff == nil {
|
||||
bufSize := utils.Min(httpRange.Length, f.GetSize())
|
||||
newBuf := bytes.NewBuffer(make([]byte, 0, bufSize))
|
||||
n, err := utils.CopyWithBufferN(newBuf, f.Reader, bufSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n != bufSize {
|
||||
return nil, fmt.Errorf("stream RangeRead did not get all data in peek, expect =%d ,actual =%d", bufSize, n)
|
||||
}
|
||||
f.peekBuff = bytes.NewReader(newBuf.Bytes())
|
||||
f.Reader = io.MultiReader(f.peekBuff, f.Reader)
|
||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||
} else {
|
||||
_, err := f.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return io.NewSectionReader(f.tmpFile, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
|
||||
var _ model.FileStreamer = (*SeekableStream)(nil)
|
||||
var _ model.FileStreamer = (*FileStream)(nil)
|
||||
|
||||
//var _ seekableStream = (*FileStream)(nil)
|
||||
|
||||
// for most internal stream, which is either RangeReadCloser or MFile
|
||||
type SeekableStream struct {
|
||||
FileStream
|
||||
Link *model.Link
|
||||
// should have one of belows to support rangeRead
|
||||
rangeReadCloser model.RangeReadCloserIF
|
||||
mFile model.File
|
||||
}
|
||||
|
||||
func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) {
|
||||
if len(fs.Mimetype) == 0 {
|
||||
fs.Mimetype = utils.GetMimeType(fs.Obj.GetName())
|
||||
}
|
||||
ss := SeekableStream{FileStream: fs, Link: link}
|
||||
if ss.Reader != nil {
|
||||
result, ok := ss.Reader.(model.File)
|
||||
if ok {
|
||||
ss.mFile = result
|
||||
ss.Closers.Add(result)
|
||||
return &ss, nil
|
||||
}
|
||||
}
|
||||
if ss.Link != nil {
|
||||
if ss.Link.MFile != nil {
|
||||
mFile := ss.Link.MFile
|
||||
if _, ok := mFile.(*os.File); !ok {
|
||||
mFile = &RateLimitFile{
|
||||
File: mFile,
|
||||
Limiter: ServerDownloadLimit,
|
||||
Ctx: fs.Ctx,
|
||||
}
|
||||
}
|
||||
ss.mFile = mFile
|
||||
ss.Reader = mFile
|
||||
ss.Closers.Add(mFile)
|
||||
return &ss, nil
|
||||
}
|
||||
if ss.Link.RangeReadCloser != nil {
|
||||
ss.rangeReadCloser = RateLimitRangeReadCloser{
|
||||
RangeReadCloserIF: ss.Link.RangeReadCloser,
|
||||
Limiter: ServerDownloadLimit,
|
||||
}
|
||||
ss.Add(ss.rangeReadCloser)
|
||||
return &ss, nil
|
||||
}
|
||||
if len(ss.Link.URL) > 0 {
|
||||
rrc, err := GetRangeReadCloserFromLink(ss.GetSize(), link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rrc = RateLimitRangeReadCloser{
|
||||
RangeReadCloserIF: rrc,
|
||||
Limiter: ServerDownloadLimit,
|
||||
}
|
||||
ss.rangeReadCloser = rrc
|
||||
ss.Add(rrc)
|
||||
return &ss, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("illegal seekableStream")
|
||||
}
|
||||
|
||||
//func (ss *SeekableStream) Peek(length int) {
|
||||
//
|
||||
//}
|
||||
|
||||
// RangeRead is not thread-safe, pls use it in single thread only.
|
||||
func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
if httpRange.Length == -1 {
|
||||
httpRange.Length = ss.GetSize() - httpRange.Start
|
||||
}
|
||||
if ss.mFile != nil {
|
||||
return io.NewSectionReader(ss.mFile, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
if ss.tmpFile != nil {
|
||||
return io.NewSectionReader(ss.tmpFile, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
if ss.rangeReadCloser != nil {
|
||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, httpRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rc, nil
|
||||
}
|
||||
return nil, fmt.Errorf("can't find mFile or rangeReadCloser")
|
||||
}
|
||||
|
||||
//func (f *FileStream) GetReader() io.Reader {
|
||||
// return f.Reader
|
||||
//}
|
||||
|
||||
// only provide Reader as full stream when it's demanded. in rapid-upload, we can skip this to save memory
|
||||
func (ss *SeekableStream) Read(p []byte) (n int, err error) {
|
||||
//f.mu.Lock()
|
||||
|
||||
//f.peekedOnce = true
|
||||
//defer f.mu.Unlock()
|
||||
if ss.Reader == nil {
|
||||
if ss.rangeReadCloser == nil {
|
||||
return 0, fmt.Errorf("illegal seekableStream")
|
||||
}
|
||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
ss.Reader = io.NopCloser(rc)
|
||||
}
|
||||
return ss.Reader.Read(p)
|
||||
}
|
||||
|
||||
func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) {
|
||||
if ss.tmpFile != nil {
|
||||
return ss.tmpFile, nil
|
||||
}
|
||||
if _, ok := ss.mFile.(*os.File); ok {
|
||||
return ss.mFile, nil
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(ss, ss.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ss.Add(tmpF)
|
||||
ss.tmpFile = tmpF
|
||||
ss.Reader = tmpF
|
||||
return ss.tmpFile, nil
|
||||
}
|
||||
|
||||
func (ss *SeekableStream) CacheFullInTempFileAndUpdateProgress(up model.UpdateProgress) (model.File, error) {
|
||||
if ss.tmpFile != nil {
|
||||
return ss.tmpFile, nil
|
||||
}
|
||||
if _, ok := ss.mFile.(*os.File); ok {
|
||||
return ss.mFile, nil
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(&ReaderUpdatingProgress{
|
||||
Reader: ss,
|
||||
UpdateProgress: up,
|
||||
}, ss.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ss.Add(tmpF)
|
||||
ss.tmpFile = tmpF
|
||||
ss.Reader = tmpF
|
||||
return ss.tmpFile, nil
|
||||
}
|
||||
|
||||
func (f *FileStream) SetTmpFile(r *os.File) {
|
||||
f.Add(r)
|
||||
f.tmpFile = r
|
||||
f.Reader = r
|
||||
}
|
||||
|
||||
type ReaderWithSize interface {
|
||||
io.ReadCloser
|
||||
GetSize() int64
|
||||
}
|
||||
|
||||
type SimpleReaderWithSize struct {
|
||||
io.Reader
|
||||
Size int64
|
||||
}
|
||||
|
||||
func (r *SimpleReaderWithSize) GetSize() int64 {
|
||||
return r.Size
|
||||
}
|
||||
|
||||
func (r *SimpleReaderWithSize) Close() error {
|
||||
if c, ok := r.Reader.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ReaderUpdatingProgress struct {
|
||||
Reader ReaderWithSize
|
||||
model.UpdateProgress
|
||||
offset int
|
||||
}
|
||||
|
||||
func (r *ReaderUpdatingProgress) Read(p []byte) (n int, err error) {
|
||||
n, err = r.Reader.Read(p)
|
||||
r.offset += n
|
||||
r.UpdateProgress(math.Min(100.0, float64(r.offset)/float64(r.Reader.GetSize())*100.0))
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *ReaderUpdatingProgress) Close() error {
|
||||
return r.Reader.Close()
|
||||
}
|
||||
|
||||
type SStreamReadAtSeeker interface {
|
||||
model.File
|
||||
GetRawStream() *SeekableStream
|
||||
}
|
||||
|
||||
type readerCur struct {
|
||||
reader io.Reader
|
||||
cur int64
|
||||
}
|
||||
|
||||
type RangeReadReadAtSeeker struct {
|
||||
ss *SeekableStream
|
||||
masterOff int64
|
||||
readers []*readerCur
|
||||
*headCache
|
||||
}
|
||||
|
||||
type headCache struct {
|
||||
*readerCur
|
||||
bufs [][]byte
|
||||
}
|
||||
|
||||
func (c *headCache) read(p []byte) (n int, err error) {
|
||||
pL := len(p)
|
||||
logrus.Debugf("headCache read_%d", pL)
|
||||
if c.cur < int64(pL) {
|
||||
bufL := int64(pL) - c.cur
|
||||
buf := make([]byte, bufL)
|
||||
lr := io.LimitReader(c.reader, bufL)
|
||||
off := 0
|
||||
for c.cur < int64(pL) {
|
||||
n, err = lr.Read(buf[off:])
|
||||
off += n
|
||||
c.cur += int64(n)
|
||||
if err == io.EOF && off == int(bufL) {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
c.bufs = append(c.bufs, buf)
|
||||
}
|
||||
n = 0
|
||||
if c.cur >= int64(pL) {
|
||||
for i := 0; n < pL; i++ {
|
||||
buf := c.bufs[i]
|
||||
r := len(buf)
|
||||
if n+r > pL {
|
||||
r = pL - n
|
||||
}
|
||||
n += copy(p[n:], buf[:r])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
func (r *headCache) close() error {
|
||||
for i := range r.bufs {
|
||||
r.bufs[i] = nil
|
||||
}
|
||||
r.bufs = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) InitHeadCache() {
|
||||
if r.ss.Link.MFile == nil && r.masterOff == 0 {
|
||||
reader := r.readers[0]
|
||||
r.readers = r.readers[1:]
|
||||
r.headCache = &headCache{readerCur: reader}
|
||||
}
|
||||
}
|
||||
|
||||
func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStreamReadAtSeeker, error) {
|
||||
if ss.mFile != nil {
|
||||
_, err := ss.mFile.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FileReadAtSeeker{ss: ss}, nil
|
||||
}
|
||||
r := &RangeReadReadAtSeeker{
|
||||
ss: ss,
|
||||
masterOff: offset,
|
||||
}
|
||||
if offset != 0 || utils.IsBool(forceRange...) {
|
||||
if offset < 0 || offset > ss.GetSize() {
|
||||
return nil, errors.New("offset out of range")
|
||||
}
|
||||
_, err := r.getReaderAtOffset(offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
rc := &readerCur{reader: ss, cur: offset}
|
||||
r.readers = append(r.readers, rc)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream {
|
||||
return r.ss
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (*readerCur, error) {
|
||||
var rc *readerCur
|
||||
for _, reader := range r.readers {
|
||||
if reader.cur == -1 {
|
||||
continue
|
||||
}
|
||||
if reader.cur == off {
|
||||
// logrus.Debugf("getReaderAtOffset match_%d", off)
|
||||
return reader, nil
|
||||
}
|
||||
if reader.cur > 0 && off >= reader.cur && (rc == nil || reader.cur < rc.cur) {
|
||||
rc = reader
|
||||
}
|
||||
}
|
||||
if rc != nil && off-rc.cur <= utils.MB {
|
||||
n, err := utils.CopyWithBufferN(io.Discard, rc.reader, off-rc.cur)
|
||||
rc.cur += n
|
||||
if err == io.EOF && rc.cur == off {
|
||||
err = nil
|
||||
}
|
||||
if err == nil {
|
||||
logrus.Debugf("getReaderAtOffset old_%d", off)
|
||||
return rc, nil
|
||||
}
|
||||
rc.cur = -1
|
||||
}
|
||||
logrus.Debugf("getReaderAtOffset new_%d", off)
|
||||
|
||||
// Range请求不能超过文件大小,有些云盘处理不了就会返回整个文件
|
||||
reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: r.ss.GetSize() - off})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rc = &readerCur{reader: reader, cur: off}
|
||||
r.readers = append(r.readers, rc)
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) ReadAt(p []byte, off int64) (int, error) {
|
||||
if off == 0 && r.headCache != nil {
|
||||
return r.headCache.read(p)
|
||||
}
|
||||
rc, err := r.getReaderAtOffset(off)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, num := 0, 0
|
||||
for num < len(p) {
|
||||
n, err = rc.reader.Read(p[num:])
|
||||
rc.cur += int64(n)
|
||||
num += n
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
if err == io.EOF {
|
||||
// io.EOF是reader读取完了
|
||||
rc.cur = -1
|
||||
// yeka/zip包 没有处理EOF,我们要兼容
|
||||
// https://github.com/yeka/zip/blob/03d6312748a9d6e0bc0c9a7275385c09f06d9c14/reader.go#L433
|
||||
if num == len(p) {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
return num, err
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
case io.SeekCurrent:
|
||||
if offset == 0 {
|
||||
return r.masterOff, nil
|
||||
}
|
||||
offset += r.masterOff
|
||||
case io.SeekEnd:
|
||||
offset += r.ss.GetSize()
|
||||
default:
|
||||
return 0, errs.NotSupport
|
||||
}
|
||||
if offset < 0 {
|
||||
return r.masterOff, errors.New("invalid seek: negative position")
|
||||
}
|
||||
if offset > r.ss.GetSize() {
|
||||
return r.masterOff, io.EOF
|
||||
}
|
||||
r.masterOff = offset
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) {
|
||||
if r.masterOff == 0 && r.headCache != nil {
|
||||
return r.headCache.read(p)
|
||||
}
|
||||
rc, err := r.getReaderAtOffset(r.masterOff)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = rc.reader.Read(p)
|
||||
rc.cur += int64(n)
|
||||
r.masterOff += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) Close() error {
|
||||
if r.headCache != nil {
|
||||
_ = r.headCache.close()
|
||||
}
|
||||
return r.ss.Close()
|
||||
}
|
||||
|
||||
type FileReadAtSeeker struct {
|
||||
ss *SeekableStream
|
||||
}
|
||||
|
||||
func (f *FileReadAtSeeker) GetRawStream() *SeekableStream {
|
||||
return f.ss
|
||||
}
|
||||
|
||||
func (f *FileReadAtSeeker) Read(p []byte) (n int, err error) {
|
||||
return f.ss.mFile.Read(p)
|
||||
}
|
||||
|
||||
func (f *FileReadAtSeeker) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
return f.ss.mFile.ReadAt(p, off)
|
||||
}
|
||||
|
||||
func (f *FileReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
return f.ss.mFile.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (f *FileReadAtSeeker) Close() error {
|
||||
return f.ss.Close()
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user