Compare commits

..

2 Commits

Author SHA1 Message Date
86393d25e9 feat(ilanzou): enable local sorting 2025-07-25 17:32:16 +08:00
4df53c965c feat(cloudreve): enable local sorting 2025-07-25 17:32:00 +08:00
159 changed files with 2260 additions and 4392 deletions

View File

@ -61,7 +61,7 @@ jobs:
strategy:
matrix:
include:
- target: "!(*musl*|*windows-arm64*|*windows7-*|*android*|*freebsd*)" # xgo and loongarch
- target: "!(*musl*|*windows-arm64*|*android*|*freebsd*)" # xgo and loongarch
hash: "md5"
- target: "linux-!(arm*)-musl*" #musl-not-arm
hash: "md5-linux-musl"
@ -69,8 +69,6 @@ jobs:
hash: "md5-linux-musl-arm"
- target: "windows-arm64" #win-arm64
hash: "md5-windows-arm64"
- target: "windows7-*" #win7
hash: "md5-windows7"
- target: "android-*" #android
hash: "md5-android"
- target: "freebsd-*" #freebsd
@ -93,7 +91,6 @@ jobs:
run: bash build.sh dev web
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Build
uses: OpenListTeam/cgo-actions@v1.2.2
@ -106,10 +103,10 @@ jobs:
musl-base-url: "https://github.com/OpenListTeam/musl-compilers/releases/latest/download/"
x-flags: |
github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=The OpenList Projects Contributors <noreply@openlist.team>
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=OpenList
github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit
github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=rolling
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=dev
- name: Compress
run: |

View File

@ -39,7 +39,6 @@ jobs:
run: bash build.sh dev web
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Build
uses: OpenListTeam/cgo-actions@v1.2.2
@ -50,10 +49,10 @@ jobs:
out-dir: build
x-flags: |
github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$built_at
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=The OpenList Projects Contributors <noreply@openlist.team>
github.com/OpenListTeam/OpenList/v4/internal/conf.GitAuthor=OpenList
github.com/OpenListTeam/OpenList/v4/internal/conf.GitCommit=$git_commit
github.com/OpenListTeam/OpenList/v4/internal/conf.Version=$tag
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=rolling
github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=dev
output: openlist$ext
- name: Upload artifact

View File

@ -66,7 +66,6 @@ jobs:
bash build.sh release ${{ matrix.build-type == 'lite' && 'lite' || '' }} ${{ matrix.target-platform }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload assets
uses: softprops/action-gh-release@v2

View File

@ -31,7 +31,7 @@ env:
REGISTRY: ghcr.io
ARTIFACT_NAME: 'binaries_docker_release'
ARTIFACT_NAME_LITE: 'binaries_docker_release_lite'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/ppc64le,linux/riscv64,linux/loong64' ### Temporarily disable Docker builds for linux/s390x architectures for unknown reasons.
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
IMAGE_PUSH: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }}
permissions:
@ -66,7 +66,6 @@ jobs:
run: bash build.sh release docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
@ -106,7 +105,6 @@ jobs:
run: bash build.sh release lite docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
@ -127,19 +125,15 @@ jobs:
image: ["latest", "ffmpeg", "aria2", "aio"]
include:
- image: "latest"
base_image_tag: "base"
build_arg: ""
tag_favor: ""
- image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-ffmpeg,onlatest=true"
- image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-aria2,onlatest=true"
- image: "aio"
base_image_tag: "aio"
build_arg: |
INSTALL_FFMPEG=true
INSTALL_ARIA2=true
@ -195,9 +189,7 @@ jobs:
context: .
file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
build-args: ${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }}
@ -211,19 +203,15 @@ jobs:
image: ["latest", "ffmpeg", "aria2", "aio"]
include:
- image: "latest"
base_image_tag: "base"
build_arg: ""
tag_favor: "suffix=-lite,onlatest=true"
- image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-lite-ffmpeg,onlatest=true"
- image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-lite-aria2,onlatest=true"
- image: "aio"
base_image_tag: "aio"
build_arg: |
INSTALL_FFMPEG=true
INSTALL_ARIA2=true
@ -279,9 +267,7 @@ jobs:
context: .
file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
build-args: ${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }}
platforms: ${{ env.RELEASE_PLATFORMS }}

View File

@ -1,38 +0,0 @@
name: Sync to Gitee
on:
push:
branches:
- main
workflow_dispatch:
jobs:
sync:
runs-on: ubuntu-latest
name: Sync GitHub to Gitee
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup SSH
run: |
mkdir -p ~/.ssh
echo "${{ secrets.GITEE_SSH_PRIVATE_KEY }}" > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
ssh-keyscan gitee.com >> ~/.ssh/known_hosts
- name: Create single commit and push
run: |
git config user.name "GitHub Actions"
git config user.email "actions@github.com"
# Create a new branch
git checkout --orphan new-main
git add .
git commit -m "Sync from GitHub: $(date)"
# Add Gitee remote and force push
git remote add gitee ${{ vars.GITEE_REPO_URL }}
git push --force gitee new-main:main

View File

@ -20,7 +20,7 @@ env:
IMAGE_NAME_DOCKERHUB: openlist
REGISTRY: ghcr.io
ARTIFACT_NAME: 'binaries_docker_release'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/ppc64le,linux/riscv64,linux/loong64' ### Temporarily disable Docker builds for linux/s390x architectures for unknown reasons.
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
IMAGE_PUSH: ${{ github.event_name == 'push' }}
IMAGE_TAGS_BETA: |
type=ref,event=pr
@ -55,7 +55,6 @@ jobs:
run: bash build.sh beta docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
@ -78,19 +77,15 @@ jobs:
image: ["latest", "ffmpeg", "aria2", "aio"]
include:
- image: "latest"
base_image_tag: "base"
build_arg: ""
tag_favor: ""
- image: "ffmpeg"
base_image_tag: "ffmpeg"
build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-ffmpeg,onlatest=true"
- image: "aria2"
base_image_tag: "aria2"
build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-aria2,onlatest=true"
- image: "aio"
base_image_tag: "aio"
build_arg: |
INSTALL_FFMPEG=true
INSTALL_ARIA2=true
@ -142,9 +137,7 @@ jobs:
context: .
file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }}
build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
build-args: ${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }}

View File

@ -1,7 +1,4 @@
### Default image is base. You can add other support by modifying BASE_IMAGE_TAG. The following parameters are supported: base (default), aria2, ffmpeg, aio
ARG BASE_IMAGE_TAG=base
FROM alpine:edge AS builder
FROM docker.io/library/alpine:edge AS builder
LABEL stage=go-builder
WORKDIR /app/
RUN apk add --no-cache bash curl jq gcc git go musl-dev
@ -10,26 +7,51 @@ RUN go mod download
COPY ./ ./
RUN bash build.sh release docker
FROM openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
LABEL MAINTAINER="OpenList"
FROM alpine:edge
ARG INSTALL_FFMPEG=false
ARG INSTALL_ARIA2=false
ARG USER=openlist
ARG UID=1001
ARG GID=1001
LABEL MAINTAINER="OpenList"
WORKDIR /opt/openlist/
RUN apk update && \
apk upgrade --no-cache && \
apk add --no-cache bash ca-certificates su-exec tzdata runit; \
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
mkdir -p /opt/aria2/.aria2 && \
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
mkdir -p /opt/service/stop/aria2/log && \
echo '#!/bin/sh' > /opt/service/stop/aria2/run && \
echo 'exec 2>&1' >> /opt/service/stop/aria2/run && \
echo 'exec aria2c --enable-rpc --rpc-allow-origin-all --conf-path=/opt/aria2/.aria2/aria2.conf' >> /opt/service/stop/aria2/run && \
echo '#!/bin/sh' > /opt/service/stop/aria2/log/run && \
echo 'mkdir -p /opt/openlist/data/log/aria2 2>/dev/null' >> /opt/service/stop/aria2/log/run && \
echo 'exec svlogd /opt/openlist/data/log/aria2' >> /opt/service/stop/aria2/log/run && \
chmod +x /opt/service/stop/aria2/run /opt/service/stop/aria2/log/run && \
touch /opt/aria2/.aria2/aria2.session && \
/opt/aria2/.aria2/tracker.sh ; \
rm -rf /var/cache/apk/*
RUN mkdir -p /opt/service/start/openlist && \
echo '#!/bin/sh' > /opt/service/start/openlist/run && \
echo 'exec 2>&1' >> /opt/service/start/openlist/run && \
echo 'cd /opt/openlist' >> /opt/service/start/openlist/run && \
echo 'exec ./openlist server --no-prefix' >> /opt/service/start/openlist/run && \
chmod +x /opt/service/start/openlist/run
COPY --chmod=755 --from=builder /app/bin/openlist ./
COPY --chmod=755 entrypoint.sh /entrypoint.sh
RUN adduser -u ${UID} -g ${GID} -h /opt/openlist/data -D -s /bin/sh ${USER} \
&& chown -R ${UID}:${GID} /opt \
&& chown -R ${UID}:${GID} /entrypoint.sh
USER ${USER}
RUN /entrypoint.sh version
ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
VOLUME /opt/openlist/data/
EXPOSE 5244 5245
CMD [ "/entrypoint.sh" ]

View File

@ -1,26 +1,49 @@
ARG BASE_IMAGE_TAG=base
FROM ghcr.io/openlistteam/openlist-base-image:${BASE_IMAGE_TAG}
LABEL MAINTAINER="OpenList"
FROM docker.io/library/alpine:edge
ARG TARGETPLATFORM
ARG INSTALL_FFMPEG=false
ARG INSTALL_ARIA2=false
ARG USER=openlist
ARG UID=1001
ARG GID=1001
LABEL MAINTAINER="OpenList"
WORKDIR /opt/openlist/
RUN apk update && \
apk upgrade --no-cache && \
apk add --no-cache bash ca-certificates su-exec tzdata runit; \
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
mkdir -p /opt/aria2/.aria2 && \
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
mkdir -p /opt/service/stop/aria2/log && \
echo '#!/bin/sh' > /opt/service/stop/aria2/run && \
echo 'exec 2>&1' >> /opt/service/stop/aria2/run && \
echo 'exec aria2c --enable-rpc --rpc-allow-origin-all --conf-path=/opt/aria2/.aria2/aria2.conf' >> /opt/service/stop/aria2/run && \
echo '#!/bin/sh' > /opt/service/stop/aria2/log/run && \
echo 'mkdir -p /opt/openlist/data/log/aria2 2>/dev/null' >> /opt/service/stop/aria2/log/run && \
echo 'exec svlogd /opt/openlist/data/log/aria2' >> /opt/service/stop/aria2/log/run && \
chmod +x /opt/service/stop/aria2/run /opt/service/stop/aria2/log/run && \
touch /opt/aria2/.aria2/aria2.session && \
/opt/aria2/.aria2/tracker.sh ; \
rm -rf /var/cache/apk/*
RUN mkdir -p /opt/service/start/openlist && \
echo '#!/bin/sh' > /opt/service/start/openlist/run && \
echo 'exec 2>&1' >> /opt/service/start/openlist/run && \
echo 'cd /opt/openlist' >> /opt/service/start/openlist/run && \
echo 'exec ./openlist server --no-prefix' >> /opt/service/start/openlist/run && \
chmod +x /opt/service/start/openlist/run
COPY --chmod=755 /build/${TARGETPLATFORM}/openlist ./
COPY --chmod=755 entrypoint.sh /entrypoint.sh
RUN adduser -u ${UID} -g ${GID} -h /opt/openlist/data -D -s /bin/sh ${USER} \
&& chown -R ${UID}:${GID} /opt \
&& chown -R ${UID}:${GID} /entrypoint.sh
USER ${USER}
RUN /entrypoint.sh version
ENV UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
VOLUME /opt/openlist/data/
EXPOSE 5244 5245
CMD [ "/entrypoint.sh" ]

248
build.sh
View File

@ -4,9 +4,6 @@ builtAt="$(date +'%F %T %z')"
gitAuthor="The OpenList Projects Contributors <noreply@openlist.team>"
gitCommit=$(git log --pretty=format:"%h" -1)
# Set frontend repository, default to OpenListTeam/OpenList-Frontend
frontendRepo="${FRONTEND_REPO:-OpenListTeam/OpenList-Frontend}"
githubAuthArgs=""
if [ -n "$GITHUB_TOKEN" ]; then
githubAuthArgs="--header \"Authorization: Bearer $GITHUB_TOKEN\""
@ -20,15 +17,15 @@ fi
if [ "$1" = "dev" ]; then
version="dev"
webVersion="rolling"
webVersion="dev"
elif [ "$1" = "beta" ]; then
version="beta"
webVersion="rolling"
webVersion="dev"
else
git tag -d beta || true
# Always true if there's no tag
version=$(git describe --abbrev=0 --tags 2>/dev/null || echo "v0.0.0")
webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/$frontendRepo/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
webVersion=$(eval "curl -fsSL --max-time 2 $githubAuthArgs \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
fi
echo "backend version: $version"
@ -48,21 +45,30 @@ ldflags="\
-X 'github.com/OpenListTeam/OpenList/v4/internal/conf.WebVersion=$webVersion' \
"
FetchWebRolling() {
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/tags/rolling\"")
FetchWebDev() {
pre_release_tag=$(eval "curl -fsSL --max-time 2 $githubAuthArgs https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases" | jq -r 'map(select(.prerelease)) | first | .tag_name')
if [ -z "$pre_release_tag" ] || [ "$pre_release_tag" == "null" ]; then
# fall back to latest release
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"")
else
pre_release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/tags/$pre_release_tag\"")
fi
pre_release_assets=$(echo "$pre_release_json" | jq -r '.assets[].browser_download_url')
# There is no lite for rolling
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist" | grep -v "lite" | grep "\.tar\.gz$")
curl -fsSL "$pre_release_tar_url" -o dist.tar.gz
if [ "$useLite" = true ]; then
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist-lite" | grep "\.tar\.gz$")
else
pre_release_tar_url=$(echo "$pre_release_assets" | grep "openlist-frontend-dist" | grep -v "lite" | grep "\.tar\.gz$")
fi
curl -fsSL "$pre_release_tar_url" -o web-dist-dev.tar.gz
rm -rf public/dist && mkdir -p public/dist
tar -zxvf dist.tar.gz -C public/dist
rm -rf dist.tar.gz
tar -zxvf web-dist-dev.tar.gz -C public/dist
rm -rf web-dist-dev.tar.gz
}
FetchWebRelease() {
release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/$frontendRepo/releases/latest\"")
release_json=$(eval "curl -fsSL --max-time 2 $githubAuthArgs -H \"Accept: application/vnd.github.v3+json\" \"https://api.github.com/repos/OpenListTeam/OpenList-Frontend/releases/latest\"")
release_assets=$(echo "$release_json" | jq -r '.assets[].browser_download_url')
if [ "$useLite" = true ]; then
@ -89,45 +95,6 @@ BuildWinArm64() {
go build -o "$1" -ldflags="$ldflags" -tags=jsoniter .
}
BuildWin7() {
# Setup Win7 Go compiler (patched version that supports Windows 7)
go_version=$(go version | grep -o 'go[0-9]\+\.[0-9]\+\.[0-9]\+' | sed 's/go//')
echo "Detected Go version: $go_version"
curl -fsSL --retry 3 -o go-win7.zip -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/XTLS/go-win7/releases/download/patched-${go_version}/go-for-win7-linux-amd64.zip"
rm -rf go-win7
unzip go-win7.zip -d go-win7
rm go-win7.zip
# Set permissions for all wrapper files
chmod +x ./wrapper/zcc-win7
chmod +x ./wrapper/zcxx-win7
chmod +x ./wrapper/zcc-win7-386
chmod +x ./wrapper/zcxx-win7-386
# Build for both 386 and amd64 architectures
for arch in "386" "amd64"; do
echo "building for windows7-${arch}"
export GOOS=windows
export GOARCH=${arch}
export CGO_ENABLED=1
# Use architecture-specific wrapper files
if [ "$arch" = "386" ]; then
export CC=$(pwd)/wrapper/zcc-win7-386
export CXX=$(pwd)/wrapper/zcxx-win7-386
else
export CC=$(pwd)/wrapper/zcc-win7
export CXX=$(pwd)/wrapper/zcxx-win7
fi
# Use the patched Go compiler for Win7 compatibility
$(pwd)/go-win7/bin/go build -o "${1}-${arch}.exe" -ldflags="$ldflags" -tags=jsoniter .
done
}
BuildDev() {
rm -rf .git/
mkdir -p "dist"
@ -167,7 +134,7 @@ BuildDocker() {
PrepareBuildDockerMusl() {
mkdir -p build/musl-libs
BASE="https://github.com/OpenListTeam/musl-compilers/releases/latest/download/"
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross loongarch64-linux-musl-cross) ## Disable s390x-linux-musl-cross builds
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross)
for i in "${FILES[@]}"; do
url="${BASE}${i}.tgz"
lib_tgz="build/${i}.tgz"
@ -186,8 +153,8 @@ BuildDockerMultiplatform() {
docker_lflags="--extldflags '-static -fpic' $ldflags"
export CGO_ENABLED=1
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-riscv64 linux-ppc64le linux-loong64) ## Disable linux-s390x builds
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc loongarch64-linux-musl-gcc) ## Disable s390x-linux-musl-gcc builds
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x linux-riscv64 linux-ppc64le)
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc)
for i in "${!OS_ARCHES[@]}"; do
os_arch=${OS_ARCHES[$i]}
cgo_cc=${CGO_ARGS[$i]}
@ -219,171 +186,12 @@ BuildRelease() {
rm -rf .git/
mkdir -p "build"
BuildWinArm64 ./build/"$appName"-windows-arm64.exe
BuildWin7 ./build/"$appName"-windows7
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
# why? Because some target platforms seem to have issues with upx compression
# upx -9 ./"$appName"-linux-amd64
# cp ./"$appName"-windows-amd64.exe ./"$appName"-windows-amd64-upx.exe
# upx -9 ./"$appName"-windows-amd64-upx.exe
mv "$appName"-* build
# Build LoongArch with glibc (both old world abi1.0 and new world abi2.0)
# Separate from musl builds to avoid cache conflicts
BuildLoongGLIBC ./build/$appName-linux-loong64-abi1.0 abi1.0
BuildLoongGLIBC ./build/$appName-linux-loong64 abi2.0
}
BuildLoongGLIBC() {
local target_abi="$2"
local output_file="$1"
local oldWorldGoVersion="1.24.3"
if [ "$target_abi" = "abi1.0" ]; then
echo building for linux-loong64-abi1.0
else
echo building for linux-loong64-abi2.0
target_abi="abi2.0" # Default to abi2.0 if not specified
fi
# Note: No longer need global cache cleanup since ABI1.0 uses isolated cache directory
echo "Using optimized cache strategy: ABI1.0 has isolated cache, ABI2.0 uses standard cache"
if [ "$target_abi" = "abi1.0" ]; then
# Setup abi1.0 toolchain and patched Go compiler similar to cgo-action implementation
echo "Setting up Loongson old-world ABI1.0 toolchain and patched Go compiler..."
# Download and setup patched Go compiler for old-world
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
-o go-loong64-abi1.0.tar.gz; then
echo "Error: Failed to download patched Go compiler for old-world ABI1.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
-o go-loong64-abi1.0.tar.gz || true
fi
return 1
fi
rm -rf go-loong64-abi1.0
mkdir go-loong64-abi1.0
if ! tar -xzf go-loong64-abi1.0.tar.gz -C go-loong64-abi1.0 --strip-components=1; then
echo "Error: Failed to extract patched Go compiler"
return 1
fi
rm go-loong64-abi1.0.tar.gz
# Download and setup GCC toolchain for old-world
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/loongson-gnu-toolchain-8.3.novec-x86_64-loongarch64-linux-gnu-rc1.1.tar.xz" \
-o gcc8-loong64-abi1.0.tar.xz; then
echo "Error: Failed to download GCC toolchain for old-world ABI1.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/loongson-gnu-toolchain-8.3.novec-x86_64-loongarch64-linux-gnu-rc1.1.tar.xz" \
-o gcc8-loong64-abi1.0.tar.xz || true
fi
return 1
fi
rm -rf gcc8-loong64-abi1.0
mkdir gcc8-loong64-abi1.0
if ! tar -Jxf gcc8-loong64-abi1.0.tar.xz -C gcc8-loong64-abi1.0 --strip-components=1; then
echo "Error: Failed to extract GCC toolchain"
return 1
fi
rm gcc8-loong64-abi1.0.tar.xz
# Setup separate cache directory for ABI1.0 to avoid cache pollution
abi1_cache_dir="$(pwd)/go-loong64-abi1.0-cache"
mkdir -p "$abi1_cache_dir"
echo "Using separate cache directory for ABI1.0: $abi1_cache_dir"
# Use patched Go compiler for old-world build (critical for ABI1.0 compatibility)
echo "Building with patched Go compiler for old-world ABI1.0..."
echo "Using isolated cache directory: $abi1_cache_dir"
# Use env command to set environment variables locally without affecting global environment
if ! env GOOS=linux GOARCH=loong64 \
CC="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc" \
CXX="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++" \
CGO_ENABLED=1 \
GOCACHE="$abi1_cache_dir" \
$(pwd)/go-loong64-abi1.0/bin/go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed with patched Go compiler"
echo "Attempting retry with cache cleanup..."
env GOCACHE="$abi1_cache_dir" $(pwd)/go-loong64-abi1.0/bin/go clean -cache
if ! env GOOS=linux GOARCH=loong64 \
CC="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc" \
CXX="$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++" \
CGO_ENABLED=1 \
GOCACHE="$abi1_cache_dir" \
$(pwd)/go-loong64-abi1.0/bin/go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed again after cache cleanup"
echo "Build environment details:"
echo "GOOS=linux"
echo "GOARCH=loong64"
echo "CC=$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc"
echo "CXX=$(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-g++"
echo "CGO_ENABLED=1"
echo "GOCACHE=$abi1_cache_dir"
echo "Go version: $($(pwd)/go-loong64-abi1.0/bin/go version)"
echo "GCC version: $($(pwd)/gcc8-loong64-abi1.0/bin/loongarch64-linux-gnu-gcc --version | head -1)"
return 1
fi
fi
else
# Setup abi2.0 toolchain for new world glibc build
echo "Setting up new-world ABI2.0 toolchain..."
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/cross-tools/releases/download/20250507/x86_64-cross-tools-loongarch64-unknown-linux-gnu-legacy.tar.xz" \
-o gcc12-loong64-abi2.0.tar.xz; then
echo "Error: Failed to download GCC toolchain for new-world ABI2.0"
if [ -n "$GITHUB_TOKEN" ]; then
echo "Error output from curl:"
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
"https://github.com/loong64/cross-tools/releases/download/20250507/x86_64-cross-tools-loongarch64-unknown-linux-gnu-legacy.tar.xz" \
-o gcc12-loong64-abi2.0.tar.xz || true
fi
return 1
fi
rm -rf gcc12-loong64-abi2.0
mkdir gcc12-loong64-abi2.0
if ! tar -Jxf gcc12-loong64-abi2.0.tar.xz -C gcc12-loong64-abi2.0 --strip-components=1; then
echo "Error: Failed to extract GCC toolchain"
return 1
fi
rm gcc12-loong64-abi2.0.tar.xz
export GOOS=linux
export GOARCH=loong64
export CC=$(pwd)/gcc12-loong64-abi2.0/bin/loongarch64-unknown-linux-gnu-gcc
export CXX=$(pwd)/gcc12-loong64-abi2.0/bin/loongarch64-unknown-linux-gnu-g++
export CGO_ENABLED=1
# Use standard Go compiler for new-world build
echo "Building with standard Go compiler for new-world ABI2.0..."
if ! go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed with standard Go compiler"
echo "Attempting retry with cache cleanup..."
go clean -cache
if ! go build -a -o "$output_file" -ldflags="$ldflags" -tags=jsoniter .; then
echo "Error: Build failed again after cache cleanup"
echo "Build environment details:"
echo "GOOS=$GOOS"
echo "GOARCH=$GOARCH"
echo "CC=$CC"
echo "CXX=$CXX"
echo "CGO_ENABLED=$CGO_ENABLED"
echo "Go version: $(go version)"
echo "GCC version: $($CC --version | head -1)"
return 1
fi
fi
fi
}
BuildReleaseLinuxMusl() {
@ -441,7 +249,6 @@ BuildReleaseLinuxMuslArm() {
done
}
BuildReleaseAndroid() {
rm -rf .git/
mkdir -p "build"
@ -537,7 +344,7 @@ MakeRelease() {
tar -czvf compress/"$i$liteSuffix".tar.gz "$appName"
rm -f "$appName"
done
for i in $(find . -type f \( -name "$appName-windows-*" -o -name "$appName-windows7-*" \)); do
for i in $(find . -type f -name "$appName-windows-*"); do
cp "$i" "$appName".exe
zip compress/$(echo $i | sed 's/\.[^.]*$//')$liteSuffix.zip "$appName".exe
rm -f "$appName".exe
@ -584,7 +391,7 @@ for arg in "$@"; do
done
if [ "$buildType" = "dev" ]; then
FetchWebRolling
FetchWebDev
if [ "$dockerType" = "docker" ]; then
BuildDocker
elif [ "$dockerType" = "docker-multiplatform" ]; then
@ -596,7 +403,7 @@ if [ "$buildType" = "dev" ]; then
fi
elif [ "$buildType" = "release" -o "$buildType" = "beta" ]; then
if [ "$buildType" = "beta" ]; then
FetchWebRolling
FetchWebDev
else
FetchWebRelease
fi
@ -677,5 +484,4 @@ else
echo -e " $0 release"
echo -e " $0 release lite"
echo -e " $0 release docker lite"
echo -e " $0 release linux_musl"
fi

View File

@ -4,8 +4,6 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
package cmd
import (
"fmt"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
@ -26,11 +24,10 @@ var AdminCmd = &cobra.Command{
if err != nil {
utils.Log.Errorf("failed get admin user: %+v", err)
} else {
utils.Log.Infof("get admin user from CLI")
fmt.Println("Admin user's username:", admin.Username)
fmt.Println("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed")
fmt.Println("You can reset the password with a random string by running [openlist admin random]")
fmt.Println("You can also set a new password by running [openlist admin set NEW_PASSWORD]")
utils.Log.Infof("Admin user's username: %s", admin.Username)
utils.Log.Infof("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed")
utils.Log.Infof("You can reset the password with a random string by running [openlist admin random]")
utils.Log.Infof("You can also set a new password by running [openlist admin set NEW_PASSWORD]")
}
},
}
@ -39,7 +36,6 @@ var RandomPasswordCmd = &cobra.Command{
Use: "random",
Short: "Reset admin user's password to a random string",
Run: func(cmd *cobra.Command, args []string) {
utils.Log.Infof("reset admin user's password to a random string from CLI")
newPwd := random.String(8)
setAdminPassword(newPwd)
},
@ -48,12 +44,12 @@ var RandomPasswordCmd = &cobra.Command{
var SetPasswordCmd = &cobra.Command{
Use: "set",
Short: "Set admin user's password",
RunE: func(cmd *cobra.Command, args []string) error {
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
return fmt.Errorf("Please enter the new password")
utils.Log.Errorf("Please enter the new password")
return
}
setAdminPassword(args[0])
return nil
},
}
@ -64,8 +60,7 @@ var ShowTokenCmd = &cobra.Command{
Init()
defer Release()
token := setting.GetStr(conf.Token)
utils.Log.Infof("show admin token from CLI")
fmt.Println("Admin token:", token)
utils.Log.Infof("Admin token: %s", token)
},
}
@ -82,10 +77,9 @@ func setAdminPassword(pwd string) {
utils.Log.Errorf("failed update admin user: %+v", err)
return
}
utils.Log.Infof("admin user has been update from CLI")
fmt.Println("admin user has been updated:")
fmt.Println("username:", admin.Username)
fmt.Println("password:", pwd)
utils.Log.Infof("admin user has been updated:")
utils.Log.Infof("username: %s", admin.Username)
utils.Log.Infof("password: %s", pwd)
DelAdminCacheOnline()
}

View File

@ -4,8 +4,6 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
package cmd
import (
"fmt"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/spf13/cobra"
@ -26,8 +24,7 @@ var Cancel2FACmd = &cobra.Command{
if err != nil {
utils.Log.Errorf("failed to cancel 2FA: %+v", err)
} else {
utils.Log.Infof("2FA is canceled from CLI")
fmt.Println("2FA canceled")
utils.Log.Info("2FA canceled")
DelAdminCacheOnline()
}
}

View File

@ -65,7 +65,6 @@ the address is defined in config file`,
var httpSrv, httpsSrv, unixSrv *http.Server
if conf.Conf.Scheme.HttpPort != -1 {
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
fmt.Printf("start HTTP server @ %s\n", httpBase)
utils.Log.Infof("start HTTP server @ %s", httpBase)
httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler}
go func() {
@ -77,7 +76,6 @@ the address is defined in config file`,
}
if conf.Conf.Scheme.HttpsPort != -1 {
httpsBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpsPort)
fmt.Printf("start HTTPS server @ %s\n", httpsBase)
utils.Log.Infof("start HTTPS server @ %s", httpsBase)
httpsSrv = &http.Server{Addr: httpsBase, Handler: r}
go func() {
@ -88,7 +86,6 @@ the address is defined in config file`,
}()
}
if conf.Conf.Scheme.UnixFile != "" {
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile)
unixSrv = &http.Server{Handler: httpHandler}
go func() {
@ -117,7 +114,6 @@ the address is defined in config file`,
s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
server.InitS3(s3r)
s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port)
fmt.Printf("start S3 server @ %s\n", s3Base)
utils.Log.Infof("start S3 server @ %s", s3Base)
go func() {
var err error
@ -142,7 +138,6 @@ the address is defined in config file`,
if err != nil {
utils.Log.Fatalf("failed to start ftp driver: %s", err.Error())
} else {
fmt.Printf("start ftp server on %s\n", conf.Conf.FTP.Listen)
utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen)
go func() {
ftpServer = ftpserver.NewFtpServer(ftpDriver)
@ -161,7 +156,6 @@ the address is defined in config file`,
if err != nil {
utils.Log.Fatalf("failed to start sftp driver: %s", err.Error())
} else {
fmt.Printf("start sftp server on %s", conf.Conf.SFTP.Listen)
utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen)
go func() {
sftpServer = sftpd.NewSftpServer(sftpDriver)

View File

@ -4,7 +4,6 @@ Copyright © 2023 NAME HERE <EMAIL ADDRESS>
package cmd
import (
"fmt"
"os"
"strconv"
@ -23,61 +22,28 @@ var storageCmd = &cobra.Command{
}
var disableStorageCmd = &cobra.Command{
Use: "disable [mount path]",
Short: "Disable a storage by mount path",
RunE: func(cmd *cobra.Command, args []string) error {
Use: "disable",
Short: "Disable a storage",
Run: func(cmd *cobra.Command, args []string) {
if len(args) < 1 {
return fmt.Errorf("mount path is required")
utils.Log.Errorf("mount path is required")
return
}
mountPath := args[0]
Init()
defer Release()
storage, err := db.GetStorageByMountPath(mountPath)
if err != nil {
return fmt.Errorf("failed to query storage: %+v", err)
}
storage.Disabled = true
err = db.UpdateStorage(storage)
if err != nil {
return fmt.Errorf("failed to update storage: %+v", err)
}
utils.Log.Infof("Storage with mount path [%s] has been disabled from CLI", mountPath)
fmt.Printf("Storage with mount path [%s] has been disabled\n", mountPath)
return nil
},
}
var deleteStorageCmd = &cobra.Command{
Use: "delete [id]",
Short: "Delete a storage by id",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return fmt.Errorf("id is required")
}
id, err := strconv.Atoi(args[0])
if err != nil {
return fmt.Errorf("id must be a number")
}
if force, _ := cmd.Flags().GetBool("force"); force {
fmt.Printf("Are you sure you want to delete storage with id [%d]? [y/N]: ", id)
var confirm string
fmt.Scanln(&confirm)
if confirm != "y" && confirm != "Y" {
fmt.Println("Delete operation cancelled.")
return nil
utils.Log.Errorf("failed to query storage: %+v", err)
} else {
storage.Disabled = true
err = db.UpdateStorage(storage)
if err != nil {
utils.Log.Errorf("failed to update storage: %+v", err)
} else {
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath)
}
}
Init()
defer Release()
err = db.DeleteStorageById(uint(id))
if err != nil {
return fmt.Errorf("failed to delete storage by id: %+v", err)
}
utils.Log.Infof("Storage with id [%d] have been deleted from CLI", id)
fmt.Printf("Storage with id [%d] have been deleted\n", id)
return nil
},
}
@ -122,14 +88,14 @@ var storageTableHeight int
var listStorageCmd = &cobra.Command{
Use: "list",
Short: "List all storages",
RunE: func(cmd *cobra.Command, args []string) error {
Run: func(cmd *cobra.Command, args []string) {
Init()
defer Release()
storages, _, err := db.GetStorages(1, -1)
if err != nil {
return fmt.Errorf("failed to query storages: %+v", err)
utils.Log.Errorf("failed to query storages: %+v", err)
} else {
fmt.Printf("Found %d storages\n", len(storages))
utils.Log.Infof("Found %d storages", len(storages))
columns := []table.Column{
{Title: "ID", Width: 4},
{Title: "Driver", Width: 16},
@ -172,11 +138,10 @@ var listStorageCmd = &cobra.Command{
m := model{t}
if _, err := tea.NewProgram(m).Run(); err != nil {
fmt.Printf("failed to run program: %+v\n", err)
utils.Log.Errorf("failed to run program: %+v", err)
os.Exit(1)
}
}
return nil
},
}
@ -186,8 +151,6 @@ func init() {
storageCmd.AddCommand(disableStorageCmd)
storageCmd.AddCommand(listStorageCmd)
storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height")
storageCmd.AddCommand(deleteStorageCmd)
deleteStorageCmd.Flags().BoolP("force", "f", false, "Force delete without confirmation")
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command

View File

@ -6,9 +6,10 @@ services:
ports:
- '5244:5244'
- '5245:5245'
user: '0:0'
environment:
- PUID=0
- PGID=0
- UMASK=022
- TZ=Asia/Shanghai
- TZ=UTC
container_name: openlist
image: 'openlistteam/openlist:latest'

View File

@ -186,7 +186,9 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
preHash = strings.ToUpper(preHash)
fullHash := stream.GetHash().GetHash(utils.SHA1)
if len(fullHash) != utils.SHA1.Width {
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA1)
if err != nil {
return nil, err
}

View File

@ -321,7 +321,7 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
err error
)
tmpF, err := s.CacheFullAndWriter(&up, nil)
tmpF, err := s.CacheFullInTempFile()
if err != nil {
return nil, err
}

View File

@ -131,23 +131,6 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}, nil
}
func (d *Open115) GetObjInfo(ctx context.Context, path string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
resp, err := d.client.GetFolderInfoByPath(ctx, path)
if err != nil {
return nil, err
}
return &Obj{
Fid: resp.FileID,
Fn: resp.FileName,
Fc: resp.FileCategory,
Sha1: resp.Sha1,
Pc: resp.PickCode,
}, nil
}
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
@ -239,7 +222,9 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
}
sha1 := file.GetHash().GetHash(utils.SHA1)
if len(sha1) != utils.SHA1.Width {
_, sha1, err = stream.CacheFullAndHash(file, &up, utils.SHA1)
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, sha1, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.SHA1)
if err != nil {
return err
}

View File

@ -9,7 +9,6 @@ import (
sdk "github.com/OpenListTeam/115-sdk-go"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/avast/retry-go"
@ -70,6 +69,9 @@ func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp
// }
func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
fileSize := stream.GetSize()
chunkSize := calPartSize(fileSize)
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
if err != nil {
return err
@ -84,13 +86,6 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
return err
}
fileSize := stream.GetSize()
chunkSize := calPartSize(fileSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize), &up)
if err != nil {
return err
}
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
parts := make([]oss.UploadPart, partNum)
offset := int64(0)
@ -103,13 +98,10 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
if i == partNum {
partSize = fileSize - (i-1)*chunkSize
}
rd, err := ss.GetSectionReader(offset, partSize)
if err != nil {
return err
}
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
err = retry.Do(func() error {
rd.Seek(0, io.SeekStart)
_ = rd.Reset()
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i))
if err != nil {
return err
@ -120,7 +112,6 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
ss.FreeSectionReader(rd)
if err != nil {
return err
}

View File

@ -64,6 +64,14 @@ func (d *Pan123) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if f, ok := file.(File); ok {
//var resp DownResp
var headers map[string]string
if !utils.IsLocalIPAddr(args.IP) {
headers = map[string]string{
//"X-Real-IP": "1.1.1.1",
"X-Forwarded-For": args.IP,
}
}
data := base.Json{
"driveId": 0,
"etag": f.Etag,
@ -75,27 +83,25 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}
resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
req.SetBody(data)
req.SetBody(data).SetHeaders(headers)
}, nil)
if err != nil {
return nil, err
}
downloadUrl := utils.Json.Get(resp, "data", "DownloadUrl").ToString()
ou, err := url.Parse(downloadUrl)
u, err := url.Parse(downloadUrl)
if err != nil {
return nil, err
}
u_ := ou.String()
nu := ou.Query().Get("params")
nu := u.Query().Get("params")
if nu != "" {
du, _ := base64.StdEncoding.DecodeString(nu)
u, err := url.Parse(string(du))
u, err = url.Parse(string(du))
if err != nil {
return nil, err
}
u_ = u.String()
}
u_ := u.String()
log.Debug("download url: ", u_)
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
if err != nil {
@ -112,7 +118,7 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
}
link.Header = http.Header{
"Referer": []string{fmt.Sprintf("%s://%s/", ou.Scheme, ou.Host)},
"Referer": []string{"https://www.123pan.com/"},
}
return &link, nil
} else {
@ -182,7 +188,9 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
etag := file.GetHash().GetHash(utils.MD5)
var err error
if len(etag) < utils.MD5.Width {
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
if err != nil {
return err
}

View File

@ -11,8 +11,7 @@ type Addition struct {
driver.RootID
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
AccessToken string
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
AccessToken string
}
var config = driver.Config{
@ -23,11 +22,6 @@ var config = driver.Config{
func init() {
op.RegisterDriver(func() driver.Driver {
// 新增默认选项 要在RegisterDriver初始化设置 才会对正在使用的用户生效
return &Pan123{
Addition: Addition{
UploadThread: 3,
},
}
return &Pan123{}
})
}

View File

@ -6,16 +6,11 @@ import (
"io"
"net/http"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
)
@ -74,21 +69,18 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
}
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
// fetch s3 pre signed urls
size := file.GetSize()
chunkSize := int64(16 * utils.MB)
chunkCount := 1
if size > chunkSize {
chunkCount = int((size + chunkSize - 1) / chunkSize)
}
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
tmpF, err := file.CacheFullInTempFile()
if err != nil {
return err
}
// fetch s3 pre signed urls
size := file.GetSize()
chunkSize := min(size, 16*utils.MB)
chunkCount := int(size / chunkSize)
lastChunkSize := size % chunkSize
if lastChunkSize == 0 {
if lastChunkSize > 0 {
chunkCount++
} else {
lastChunkSize = chunkSize
}
// only 1 batch is allowed
@ -98,99 +90,73 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
batchSize = 10
getS3UploadUrl = d.getS3PreSignedUrls
}
thread := min(int(chunkCount), d.UploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for i := 1; i <= chunkCount; i += batchSize {
if utils.IsCanceled(uploadCtx) {
break
if utils.IsCanceled(ctx) {
return ctx.Err()
}
start := i
end := min(i+batchSize, chunkCount+1)
s3PreSignedUrls, err := getS3UploadUrl(uploadCtx, upReq, start, end)
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end)
if err != nil {
return err
}
// upload each chunk
for cur := start; cur < end; cur++ {
if utils.IsCanceled(uploadCtx) {
break
for j := start; j < end; j++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
offset := int64(cur-1) * chunkSize
curSize := chunkSize
if cur == chunkCount {
if j == chunkCount {
curSize = lastChunkSize
}
var reader *stream.SectionReader
var rateLimitedRd io.Reader
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
reader, err = ss.GetSectionReader(offset, curSize)
if err != nil {
return err
}
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
return nil
},
Do: func(ctx context.Context) error {
reader.Seek(0, io.SeekStart)
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
if uploadUrl == "" {
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
}
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, rateLimitedRd)
if err != nil {
return err
}
req.ContentLength = curSize
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode == http.StatusForbidden {
singleflight.AnyGroup.Do(fmt.Sprintf("Pan123.newUpload_%p", threadG), func() (any, error) {
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
if err != nil {
return nil, err
}
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
return nil, nil
})
if err != nil {
return err
}
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
}
if res.StatusCode != http.StatusOK {
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
}
progress := 10.0 + 85.0*float64(threadG.Success())/float64(chunkCount)
up(progress)
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
})
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.NewSectionReader(tmpF, chunkSize*int64(j-1), curSize), curSize, false, getS3UploadUrl)
if err != nil {
return err
}
up(float64(j) * 100 / float64(chunkCount))
}
}
if err := threadG.Wait(); err != nil {
return err
}
defer up(100)
// complete s3 upload
return d.completeS3(ctx, upReq, file, chunkCount > 1)
}
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader *io.SectionReader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
if uploadUrl == "" {
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
}
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, reader))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = curSize
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode == http.StatusForbidden {
if retry {
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
}
// refresh s3 pre signed urls
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
if err != nil {
return err
}
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
// retry
reader.Seek(0, io.SeekStart)
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
}
if res.StatusCode != http.StatusOK {
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
}
return nil
}

View File

@ -2,9 +2,7 @@ package _123_open
import (
"context"
"fmt"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
@ -97,22 +95,6 @@ func (d *Open123) Rename(ctx context.Context, srcObj model.Obj, newName string)
}
func (d *Open123) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
// 尝试使用上传+MD5秒传功能实现复制
// 1. 创建文件
// parentFileID 父目录id上传到根目录时填写 0
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
if err != nil {
return fmt.Errorf("parse parentFileID error: %v", err)
}
etag := srcObj.(File).Etag
createResp, err := d.create(parentFileId, srcObj.GetName(), etag, srcObj.GetSize(), 2, false)
if err != nil {
return err
}
// 是否秒传
if createResp.Data.Reuse {
return nil
}
return errs.NotSupport
}
@ -122,64 +104,27 @@ func (d *Open123) Remove(ctx context.Context, obj model.Obj) error {
return d.trash(fileId)
}
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// 1. 创建文件
// parentFileID 父目录id上传到根目录时填写 0
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("parse parentFileID error: %v", err)
}
// etag 文件md5
etag := file.GetHash().GetHash(utils.MD5)
if len(etag) < utils.MD5.Width {
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, etag, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
if err != nil {
return nil, err
return err
}
}
createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false)
if err != nil {
return nil, err
return err
}
// 是否秒传
if createResp.Data.Reuse {
// 秒传成功才会返回正确的 FileID否则为 0
if createResp.Data.FileID != 0 {
return File{
FileName: file.GetName(),
Size: file.GetSize(),
FileId: createResp.Data.FileID,
Type: 2,
Etag: etag,
}, nil
}
return nil
}
// 2. 上传分片
err = d.Upload(ctx, file, createResp, up)
if err != nil {
return nil, err
}
// 3. 上传完毕
for range 60 {
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
// 返回错误代码未知20103文档也没有具体说
if err == nil && uploadCompleteResp.Data.Completed && uploadCompleteResp.Data.FileID != 0 {
up(100)
return File{
FileName: file.GetName(),
Size: file.GetSize(),
FileId: uploadCompleteResp.Data.FileID,
Type: 2,
Etag: etag,
}, nil
}
// 若接口返回的completed为 false 时则需间隔1秒继续轮询此接口获取上传最终结果。
time.Sleep(time.Second)
}
return nil, fmt.Errorf("upload complete timeout")
return d.Upload(ctx, file, createResp, up)
}
var _ driver.Driver = (*Open123)(nil)
var _ driver.PutResult = (*Open123)(nil)

View File

@ -73,9 +73,7 @@ func (f File) GetName() string {
}
func (f File) CreateTime() time.Time {
// 返回的时间没有时区信息,默认 UTC+8
loc := time.FixedZone("UTC+8", 8*60*60)
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.CreateAt, loc)
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.CreateAt)
if err != nil {
return time.Now()
}
@ -83,9 +81,7 @@ func (f File) CreateTime() time.Time {
}
func (f File) ModTime() time.Time {
// 返回的时间没有时区信息,默认 UTC+8
loc := time.FixedZone("UTC+8", 8*60*60)
parsedTime, err := time.ParseInLocation("2006-01-02 15:04:05", f.UpdateAt, loc)
parsedTime, err := time.Parse("2006-01-02 15:04:05", f.UpdateAt)
if err != nil {
return time.Now()
}
@ -158,23 +154,52 @@ type DownloadInfoResp struct {
} `json:"data"`
}
// 创建文件V2返回
type UploadCreateResp struct {
BaseResp
Data struct {
FileID int64 `json:"fileID"`
PreuploadID string `json:"preuploadID"`
Reuse bool `json:"reuse"`
SliceSize int64 `json:"sliceSize"`
Servers []string `json:"servers"`
FileID int64 `json:"fileID"`
PreuploadID string `json:"preuploadID"`
Reuse bool `json:"reuse"`
SliceSize int64 `json:"sliceSize"`
} `json:"data"`
}
// 上传完毕V2返回
type UploadUrlResp struct {
BaseResp
Data struct {
PresignedURL string `json:"presignedURL"`
}
}
type UploadCompleteResp struct {
BaseResp
Data struct {
Async bool `json:"async"`
Completed bool `json:"completed"`
FileID int64 `json:"fileID"`
} `json:"data"`
}
type UploadAsyncResp struct {
BaseResp
Data struct {
Completed bool `json:"completed"`
FileID int64 `json:"fileID"`
} `json:"data"`
}
type UploadResp struct {
BaseResp
Data struct {
AccessKeyId string `json:"AccessKeyId"`
Bucket string `json:"Bucket"`
Key string `json:"Key"`
SecretAccessKey string `json:"SecretAccessKey"`
SessionToken string `json:"SessionToken"`
FileId int64 `json:"FileId"`
Reuse bool `json:"Reuse"`
EndPoint string `json:"EndPoint"`
StorageNode string `json:"StorageNode"`
UploadId string `json:"UploadId"`
} `json:"data"`
}

View File

@ -1,28 +1,21 @@
package _123_open
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"strconv"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
)
// 创建文件 V2
func (d *Open123) create(parentFileID int64, filename string, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) {
var resp UploadCreateResp
_, err := d.Request(UploadCreate, http.MethodPost, func(req *resty.Request) {
@ -41,136 +34,21 @@ func (d *Open123) create(parentFileID int64, filename string, etag string, size
return &resp, nil
}
// 上传分片 V2
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
uploadDomain := createResp.Data.Servers[0]
size := file.GetSize()
chunkSize := createResp.Data.SliceSize
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return err
}
uploadNums := (size + chunkSize - 1) / chunkSize
thread := min(int(uploadNums), d.UploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for partIndex := range uploadNums {
if utils.IsCanceled(uploadCtx) {
break
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片号从1开始
offset := partIndex * chunkSize
size := min(chunkSize, size-offset)
var reader *stream.SectionReader
var rateLimitedRd io.Reader
sliceMD5 := ""
// 表单
b := bytes.NewBuffer(make([]byte, 0, 2048))
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
// 每个分片一个reader
reader, err = ss.GetSectionReader(offset, size)
if err != nil {
return err
}
// 计算当前分片的MD5
sliceMD5, err = utils.HashReader(utils.MD5, reader)
if err != nil {
return err
}
}
return nil
},
Do: func(ctx context.Context) error {
// 重置分片reader位置因为HashReader、上一次失败已经读取到分片EOF
reader.Seek(0, io.SeekStart)
b.Reset()
w := multipart.NewWriter(b)
// 添加表单字段
err = w.WriteField("preuploadID", createResp.Data.PreuploadID)
if err != nil {
return err
}
err = w.WriteField("sliceNo", strconv.FormatInt(partNumber, 10))
if err != nil {
return err
}
err = w.WriteField("sliceMD5", sliceMD5)
if err != nil {
return err
}
// 写入文件内容
_, err = w.CreateFormFile("slice", fmt.Sprintf("%s.part%d", file.GetName(), partNumber))
if err != nil {
return err
}
headSize := b.Len()
err = w.Close()
if err != nil {
return err
}
head := bytes.NewReader(b.Bytes()[:headSize])
tail := bytes.NewReader(b.Bytes()[headSize:])
rateLimitedRd = driver.NewLimitedUploadStream(ctx, io.MultiReader(head, reader, tail))
// 创建请求并设置header
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadDomain+"/upload/v2/file/slice", rateLimitedRd)
if err != nil {
return err
}
// 设置请求头
req.Header.Add("Authorization", "Bearer "+d.AccessToken)
req.Header.Add("Content-Type", w.FormDataContentType())
req.Header.Add("Platform", "open_platform")
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return fmt.Errorf("slice %d upload failed, status code: %d", partNumber, res.StatusCode)
}
var resp BaseResp
respBody, err := io.ReadAll(res.Body)
if err != nil {
return err
}
err = json.Unmarshal(respBody, &resp)
if err != nil {
return err
}
if resp.Code != 0 {
return fmt.Errorf("slice %d upload failed: %s", partNumber, resp.Message)
}
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
up(progress)
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
func (d *Open123) url(preuploadID string, sliceNo int64) (string, error) {
// get upload url
var resp UploadUrlResp
_, err := d.Request(UploadUrl, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"preuploadId": preuploadID,
"sliceNo": sliceNo,
})
}, &resp)
if err != nil {
return "", err
}
if err := threadG.Wait(); err != nil {
return err
}
return nil
return resp.Data.PresignedURL, nil
}
// 上传完毕
func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
var resp UploadCompleteResp
_, err := d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
@ -183,3 +61,91 @@ func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
}
return &resp, nil
}
func (d *Open123) async(preuploadID string) (*UploadAsyncResp, error) {
var resp UploadAsyncResp
_, err := d.Request(UploadAsync, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"preuploadID": preuploadID,
})
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createResp *UploadCreateResp, up driver.UpdateProgress) error {
size := file.GetSize()
chunkSize := createResp.Data.SliceSize
uploadNums := (size + chunkSize - 1) / chunkSize
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.UploadThread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for partIndex := int64(0); partIndex < uploadNums; partIndex++ {
if utils.IsCanceled(uploadCtx) {
return ctx.Err()
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片号从1开始
offset := partIndex * chunkSize
size := min(chunkSize, size-offset)
limitedReader, err := file.RangeRead(http_range.Range{
Start: offset,
Length: size})
if err != nil {
return err
}
limitedReader = driver.NewLimitedUploadStream(ctx, limitedReader)
threadG.Go(func(ctx context.Context) error {
uploadPartUrl, err := d.url(createResp.Data.PreuploadID, partNumber)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, "PUT", uploadPartUrl, limitedReader)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = size
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
_ = res.Body.Close()
progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums)
up(progress)
return nil
})
}
if err := threadG.Wait(); err != nil {
return err
}
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
if err != nil {
return err
}
if uploadCompleteResp.Data.Async == false || uploadCompleteResp.Data.Completed {
return nil
}
for {
uploadAsyncResp, err := d.async(createResp.Data.PreuploadID)
if err != nil {
return err
}
if uploadAsyncResp.Data.Completed {
break
}
}
up(100)
return nil
}

View File

@ -19,14 +19,16 @@ var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于
AccessToken = InitApiInfo(Api+"/api/v1/access_token", 1)
RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1)
UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1)
FileList = InitApiInfo(Api+"/api/v2/file/list", 3)
FileList = InitApiInfo(Api+"/api/v2/file/list", 4)
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 0)
Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2)
Move = InitApiInfo(Api+"/api/v1/file/move", 1)
Rename = InitApiInfo(Api+"/api/v1/file/name", 1)
Trash = InitApiInfo(Api+"/api/v1/file/trash", 2)
UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2)
UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0)
UploadCreate = InitApiInfo(Api+"/upload/v1/file/create", 2)
UploadUrl = InitApiInfo(Api+"/upload/v1/file/get_upload_url", 0)
UploadComplete = InitApiInfo(Api+"/upload/v1/file/upload_complete", 0)
UploadAsync = InitApiInfo(Api+"/upload/v1/file/upload_async_result", 1)
)
func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {

View File

@ -70,6 +70,14 @@ func (d *Pan123Share) List(ctx context.Context, dir model.Obj, args model.ListAr
func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
// TODO return link of file, required
if f, ok := file.(File); ok {
//var resp DownResp
var headers map[string]string
if !utils.IsLocalIPAddr(args.IP) {
headers = map[string]string{
//"X-Real-IP": "1.1.1.1",
"X-Forwarded-For": args.IP,
}
}
data := base.Json{
"shareKey": d.ShareKey,
"SharePwd": d.SharePwd,
@ -79,27 +87,25 @@ func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkA
"size": f.Size,
}
resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
req.SetBody(data)
req.SetBody(data).SetHeaders(headers)
}, nil)
if err != nil {
return nil, err
}
downloadUrl := utils.Json.Get(resp, "data", "DownloadURL").ToString()
ou, err := url.Parse(downloadUrl)
u, err := url.Parse(downloadUrl)
if err != nil {
return nil, err
}
u_ := ou.String()
nu := ou.Query().Get("params")
nu := u.Query().Get("params")
if nu != "" {
du, _ := base64.StdEncoding.DecodeString(nu)
u, err := url.Parse(string(du))
u, err = url.Parse(string(du))
if err != nil {
return nil, err
}
u_ = u.String()
}
u_ := u.String()
log.Debug("download url: ", u_)
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
if err != nil {
@ -116,7 +122,7 @@ func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkA
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
}
link.Header = http.Header{
"Referer": []string{fmt.Sprintf("%s://%s/", ou.Scheme, ou.Host)},
"Referer": []string{"https://www.123pan.com/"},
}
return &link, nil
}

View File

@ -522,17 +522,21 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
var err error
fullHash := stream.GetHash().GetHash(utils.SHA256)
if len(fullHash) != utils.SHA256.Width {
_, fullHash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA256)
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA256)
if err != nil {
return err
}
}
size := stream.GetSize()
partSize := d.getPartSize(size)
part := int64(1)
if size > partSize {
part = (size + partSize - 1) / partSize
var partSize = d.getPartSize(size)
part := size / partSize
if size%partSize > 0 {
part++
} else if part == 0 {
part = 1
}
partInfos := make([]PartInfo, 0, part)
for i := int64(0); i < part; i++ {
@ -634,10 +638,11 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
// Update Progress
r := io.TeeReader(limitReader, p)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartInfo.UploadUrl, r)
req, err := http.NewRequest("PUT", uploadPartInfo.UploadUrl, r)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", fmt.Sprint(partSize))
req.Header.Set("Origin", "https://yun.139.com")
@ -783,10 +788,12 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
size := stream.GetSize()
// Progress
p := driver.NewProgress(size, up)
partSize := d.getPartSize(size)
part := int64(1)
if size > partSize {
part = (size + partSize - 1) / partSize
var partSize = d.getPartSize(size)
part := size / partSize
if size%partSize > 0 {
part++
} else if part == 0 {
part = 1
}
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
for i := int64(0); i < part; i++ {
@ -800,10 +807,12 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
limitReader := io.LimitReader(rateLimited, byteSize)
// Update Progress
r := io.TeeReader(limitReader, p)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, resp.Data.UploadResult.RedirectionURL, r)
req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName()))
req.Header.Set("contentSize", strconv.FormatInt(size, 10))
req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1))

View File

@ -365,10 +365,11 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
log.Debugf("uploadData: %+v", uploadData)
requestURL := uploadData.RequestURL
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
req, err := http.NewRequestWithContext(ctx, http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
for _, v := range uploadHeaders {
i := strings.Index(v, "=")
req.Header.Set(v[0:i], v[i+1:])

View File

@ -5,19 +5,17 @@ import (
"encoding/base64"
"encoding/xml"
"fmt"
"github.com/skip2/go-qrcode"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/skip2/go-qrcode"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
@ -313,14 +311,11 @@ func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
// 旧版本上传,家庭云不支持覆盖
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
fileMd5 := file.GetHash().GetHash(utils.MD5)
var tempFile = file.GetFile()
var err error
if len(fileMd5) != utils.MD5.Width {
tempFile, fileMd5, err = stream.CacheFullAndHash(file, &up, utils.MD5)
} else if tempFile == nil {
tempFile, err = file.CacheFullAndWriter(&up, nil)
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, err
}
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
if err != nil {
return nil, err
}
@ -350,7 +345,7 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
}
_, err := y.put(ctx, status.FileUploadUrl, header, true, tempFile, isFamily)
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily)
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
return nil, err
}

View File

@ -7,7 +7,6 @@ import (
"encoding/hex"
"encoding/xml"
"fmt"
"hash"
"io"
"net/http"
"net/http/cookiejar"
@ -473,7 +472,7 @@ func (y *Cloud189PC) refreshSession() (err error) {
// 无法上传大小为0的文件
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
size := file.GetSize()
sliceSize := min(size, partSize(size))
sliceSize := partSize(size)
params := Params{
"parentFolderId": dstDir.GetID(),
@ -501,99 +500,64 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
return nil, err
}
ss, err := stream.NewStreamSectionReader(file, int(sliceSize), &up)
if err != nil {
return nil, err
}
threadG, upCtx := errgroup.NewOrderedGroupWithContext(ctx, y.uploadThread,
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
count := 1
if size > sliceSize {
count = int((size + sliceSize - 1) / sliceSize)
}
count := int(size / sliceSize)
lastPartSize := size % sliceSize
if lastPartSize == 0 {
if lastPartSize > 0 {
count++
} else {
lastPartSize = sliceSize
}
silceMd5Hexs := make([]string, 0, count)
fileMd5 := utils.MD5.NewFunc()
silceMd5 := utils.MD5.NewFunc()
var writers io.Writer = silceMd5
fileMd5Hex := file.GetHash().GetHash(utils.MD5)
var fileMd5 hash.Hash
if len(fileMd5Hex) != utils.MD5.Width {
fileMd5 = utils.MD5.NewFunc()
writers = io.MultiWriter(silceMd5, fileMd5)
}
silceMd5Hexs := make([]string, 0, count)
teeReader := io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5))
byteSize := sliceSize
for i := 1; i <= count; i++ {
if utils.IsCanceled(upCtx) {
break
}
offset := int64((i)-1) * sliceSize
size := sliceSize
if i == count {
size = lastPartSize
byteSize = lastPartSize
}
byteData := make([]byte, byteSize)
// 读取块
silceMd5.Reset()
if _, err := io.ReadFull(teeReader, byteData); err != io.EOF && err != nil {
return nil, err
}
partInfo := ""
var reader *stream.SectionReader
var rateLimitedRd io.Reader
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
reader, err = ss.GetSectionReader(offset, size)
if err != nil {
return err
}
silceMd5.Reset()
w, err := utils.CopyWithBuffer(writers, reader)
if w != size {
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", size, w, err)
}
// 计算块md5并进行hex和base64编码
md5Bytes := silceMd5.Sum(nil)
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
partInfo = fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
return nil
},
Do: func(ctx context.Context) error {
reader.Seek(0, io.SeekStart)
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
if err != nil {
return err
}
// 计算块md5并进行hex和base64编码
md5Bytes := silceMd5.Sum(nil)
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
// step.4 上传切片
uploadUrl := uploadUrls[0]
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
driver.NewLimitedUploadStream(ctx, rateLimitedRd), isFamily)
if err != nil {
return err
}
up(float64(threadG.Success()) * 100 / float64(count))
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
},
)
threadG.Go(func(ctx context.Context) error {
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
if err != nil {
return err
}
// step.4 上传切片
uploadUrl := uploadUrls[0]
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily)
if err != nil {
return err
}
up(float64(threadG.Success()) * 100 / float64(count))
return nil
})
}
if err = threadG.Wait(); err != nil {
return nil, err
}
if fileMd5 != nil {
fileMd5Hex = strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
}
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
sliceMd5Hex := fileMd5Hex
if file.GetSize() > sliceSize {
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
@ -656,12 +620,11 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
cache = tmpF
}
sliceSize := partSize(size)
count := 1
if size > sliceSize {
count = int((size + sliceSize - 1) / sliceSize)
}
count := int(size / sliceSize)
lastSliceSize := size % sliceSize
if lastSliceSize == 0 {
if lastSliceSize > 0 {
count++
} else {
lastSliceSize = sliceSize
}
@ -775,8 +738,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
}
// step.4 上传切片
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize))
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily)
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(cache, offset, byteSize), isFamily)
if err != nil {
return err
}
@ -858,7 +820,9 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo
// 旧版本上传,家庭云不支持覆盖
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
tempFile, fileMd5, err := stream.CacheFullAndHash(file, &up, utils.MD5)
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
tempFile, fileMd5, err := stream.CacheFullInTempFileAndHash(file, cacheFileProgress, utils.MD5)
if err != nil {
return nil, err
}

View File

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
"net/url"
stdpath "path"
"strings"
@ -13,7 +12,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
@ -80,18 +78,10 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
if err != nil {
continue
obj, err := d.get(ctx, path, dst, sub)
if err == nil {
return obj, nil
}
return &model.Object{
Path: path,
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}, nil
}
return nil, errs.ObjectNotFound
}
@ -109,27 +99,7 @@ func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
var objs []model.Obj
fsArgs := &fs.ListArgs{NoLog: true, Refresh: args.Refresh}
for _, dst := range dsts {
tmp, err := fs.List(ctx, stdpath.Join(dst, sub), fsArgs)
if err == nil {
tmp, err = utils.SliceConvert(tmp, func(obj model.Obj) (model.Obj, error) {
thumb, ok := model.GetThumb(obj)
objRes := model.Object{
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
}
if !ok {
return &objRes, nil
}
return &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
}, nil
})
}
tmp, err := d.list(ctx, dst, sub, fsArgs)
if err == nil {
objs = append(objs, tmp...)
}
@ -143,45 +113,45 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if !ok {
return nil, errs.ObjectNotFound
}
// proxy || ftp,s3
if common.GetApiUrl(ctx) == "" {
args.Redirect = false
}
for _, dst := range dsts {
reqPath := stdpath.Join(dst, sub)
link, fi, err := d.link(ctx, reqPath, args)
link, file, err := d.link(ctx, reqPath, args)
if err != nil {
continue
}
if link == nil {
// 重定向且需要通过代理
return &model.Link{
var resultLink *model.Link
if link != nil {
resultLink = &model.Link{
URL: link.URL,
Header: link.Header,
RangeReader: link.RangeReader,
SyncClosers: utils.NewSyncClosers(link),
ContentLength: link.ContentLength,
}
if link.MFile != nil {
resultLink.RangeReader = &model.FileRangeReader{
RangeReaderIF: stream.GetRangeReaderFromMFile(file.GetSize(), link.MFile),
}
}
} else {
resultLink = &model.Link{
URL: fmt.Sprintf("%s/p%s?sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
sign.Sign(reqPath)),
}, nil
}
}
resultLink := *link
resultLink.SyncClosers = utils.NewSyncClosers(link)
if args.Redirect {
return &resultLink, nil
}
if resultLink.ContentLength == 0 {
resultLink.ContentLength = fi.GetSize()
if !args.Redirect {
if d.DownloadConcurrency > 0 {
resultLink.Concurrency = d.DownloadConcurrency
}
if d.DownloadPartSize > 0 {
resultLink.PartSize = d.DownloadPartSize * utils.KB
}
}
if resultLink.MFile != nil {
return &resultLink, nil
}
if d.DownloadConcurrency > 0 {
resultLink.Concurrency = d.DownloadConcurrency
}
if d.DownloadPartSize > 0 {
resultLink.PartSize = d.DownloadPartSize * utils.KB
}
return &resultLink, nil
return resultLink, nil
}
return nil, errs.ObjectNotFound
}
@ -308,29 +278,24 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer,
reqPath, err := d.getReqPath(ctx, dstDir, true)
if err == nil {
if len(reqPath) == 1 {
storage, reqActualPath, err := op.GetStorageAndActualPath(*reqPath[0])
if err != nil {
return err
}
return op.Put(ctx, storage, reqActualPath, &stream.FileStream{
Obj: s,
Mimetype: s.GetMimetype(),
Reader: s,
}, up)
return fs.PutDirectly(ctx, *reqPath[0], &stream.FileStream{
Obj: s,
Mimetype: s.GetMimetype(),
WebPutAsTask: s.NeedStore(),
Reader: s,
})
} else {
file, err := s.CacheFullAndWriter(nil, nil)
file, err := s.CacheFullInTempFile()
if err != nil {
return err
}
count := float64(len(reqPath) + 1)
up(100 / count)
for i, path := range reqPath {
for _, path := range reqPath {
err = errors.Join(err, fs.PutDirectly(ctx, *path, &stream.FileStream{
Obj: s,
Mimetype: s.GetMimetype(),
Reader: file,
Obj: s,
Mimetype: s.GetMimetype(),
WebPutAsTask: s.NeedStore(),
Reader: file,
}))
up(float64(i+2) / float64(count) * 100)
_, e := file.Seek(0, io.SeekStart)
if e != nil {
return errors.Join(err, e)
@ -402,24 +367,10 @@ func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveIn
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
reqPath := stdpath.Join(dst, sub)
link, err := d.extract(ctx, reqPath, args)
if err != nil {
continue
link, err := d.extract(ctx, dst, sub, args)
if err == nil {
return link, nil
}
if link == nil {
return &model.Link{
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}, nil
}
resultLink := *link
resultLink.SyncClosers = utils.NewSyncClosers(link)
return &resultLink, nil
}
return nil, errs.NotImplement
}

View File

@ -2,6 +2,8 @@ package alias
import (
"context"
"fmt"
"net/url"
stdpath "path"
"strings"
@ -10,6 +12,8 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
)
@ -50,12 +54,55 @@ func (d *Alias) getRootAndPath(path string) (string, string) {
return parts[0], parts[1]
}
func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Obj, error) {
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
if err != nil {
return nil, err
}
return &model.Object{
Path: path,
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}, nil
}
func (d *Alias) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]model.Obj, error) {
objs, err := fs.List(ctx, stdpath.Join(dst, sub), args)
// the obj must implement the model.SetPath interface
// return objs, err
if err != nil {
return nil, err
}
return utils.SliceConvert(objs, func(obj model.Obj) (model.Obj, error) {
thumb, ok := model.GetThumb(obj)
objRes := model.Object{
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
}
if !ok {
return &objRes, nil
}
return &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
}, nil
})
}
func (d *Alias) link(ctx context.Context, reqPath string, args model.LinkArgs) (*model.Link, model.Obj, error) {
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil {
return nil, nil, err
}
if !args.Redirect {
// proxy || ftp,s3
if !args.Redirect || len(common.GetApiUrl(ctx)) == 0 {
return op.Link(ctx, storage, reqActualPath, args)
}
obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
@ -136,7 +183,8 @@ func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.Arc
return nil, errs.NotImplement
}
func (d *Alias) extract(ctx context.Context, reqPath string, args model.ArchiveInnerArgs) (*model.Link, error) {
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) {
reqPath := stdpath.Join(dst, sub)
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil {
return nil, err
@ -144,12 +192,20 @@ func (d *Alias) extract(ctx context.Context, reqPath string, args model.ArchiveI
if _, ok := storage.(driver.ArchiveReader); !ok {
return nil, errs.NotImplement
}
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(reqPath)) {
_, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err == nil {
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(sub)) {
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil {
return nil, err
}
return nil, nil
link := &model.Link{
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}
return link, nil
}
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
return link, err

View File

@ -297,10 +297,11 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
if d.InternalUpload {
url = partInfo.InternalUploadUrl
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, io.LimitReader(rateLimited, DEFAULT))
req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT))
if err != nil {
return err
}
req = req.WithContext(ctx)
res, err := base.HttpClient.Do(req)
if err != nil {
return err

View File

@ -3,6 +3,7 @@ package aliyundrive_open
import (
"context"
"errors"
"fmt"
"net/http"
"path/filepath"
"time"
@ -12,6 +13,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/rateg"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
@ -22,8 +24,9 @@ type AliyundriveOpen struct {
DriveId string
limiter *limiter
ref *AliyundriveOpen
limitList func(ctx context.Context, data base.Json) (*Files, error)
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
ref *AliyundriveOpen
}
func (d *AliyundriveOpen) Config() driver.Config {
@ -35,23 +38,25 @@ func (d *AliyundriveOpen) GetAddition() driver.Additional {
}
func (d *AliyundriveOpen) Init(ctx context.Context) error {
d.limiter = getLimiterForUser(globalLimiterUserID) // First create a globally shared limiter to limit the initial requests.
if d.LIVPDownloadFormat == "" {
d.LIVPDownloadFormat = "jpeg"
}
if d.DriveType == "" {
d.DriveType = "default"
}
res, err := d.request(ctx, limiterOther, "/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
res, err := d.request("/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
if err != nil {
d.limiter.free()
d.limiter = nil
return err
}
d.DriveId = utils.Json.Get(res, d.DriveType+"_drive_id").ToString()
userid := utils.Json.Get(res, "user_id").ToString()
d.limiter.free()
d.limiter = getLimiterForUser(userid) // Allocate a corresponding limiter for each user.
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
Limit: 4,
Bucket: 1,
})
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
Limit: 1,
Bucket: 1,
})
return nil
}
@ -65,8 +70,6 @@ func (d *AliyundriveOpen) InitReference(storage driver.Driver) error {
}
func (d *AliyundriveOpen) Drop(ctx context.Context) error {
d.limiter.free()
d.limiter = nil
d.ref = nil
return nil
}
@ -84,6 +87,9 @@ func (d *AliyundriveOpen) GetRoot(ctx context.Context) (model.Obj, error) {
}
func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if d.limitList == nil {
return nil, fmt.Errorf("driver not init")
}
files, err := d.getFiles(ctx, dir.GetID())
if err != nil {
return nil, err
@ -101,8 +107,8 @@ func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.Li
return objs, err
}
func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
res, err := d.request(ctx, limiterLink, "/adrive/v1.0/openFile/getDownloadUrl", http.MethodPost, func(req *resty.Request) {
func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link, error) {
res, err := d.request("/adrive/v1.0/openFile/getDownloadUrl", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": file.GetID(),
@ -126,10 +132,17 @@ func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.L
}, nil
}
func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.limitLink == nil {
return nil, fmt.Errorf("driver not init")
}
return d.limitLink(ctx, file)
}
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
nowTime, _ := getNowTime()
newDir := File{CreatedAt: nowTime, UpdatedAt: nowTime}
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"parent_file_id": parentDir.GetID(),
@ -155,7 +168,7 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var resp MoveOrCopyResp
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
_, err := d.request("/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": srcObj.GetID(),
@ -185,7 +198,7 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (m
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
var newFile File
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
_, err := d.request("/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": srcObj.GetID(),
@ -217,7 +230,7 @@ func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
var resp MoveOrCopyResp
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
_, err := d.request("/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": srcObj.GetID(),
@ -243,7 +256,7 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
if d.RemoveWay == "delete" {
uri = "/adrive/v1.0/openFile/delete"
}
_, err := d.request(ctx, limiterOther, uri, http.MethodPost, func(req *resty.Request) {
_, err := d.request(uri, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": obj.GetID(),
@ -282,7 +295,7 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
default:
return nil, errs.NotSupport
}
_, err := d.request(ctx, limiterOther, uri, http.MethodPost, func(req *resty.Request) {
_, err := d.request(uri, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp)
})
if err != nil {

View File

@ -1,96 +0,0 @@
package aliyundrive_open
import (
"context"
"fmt"
"sync"
"golang.org/x/time/rate"
)
// See document https://www.yuque.com/aliyundrive/zpfszx/mqocg38hlxzc5vcd
// See issue https://github.com/OpenListTeam/OpenList/issues/724
// We got limit per user per app, so the limiter should be global.
type limiterType int
const (
limiterList limiterType = iota
limiterLink
limiterOther
)
const (
listRateLimit = 3.9 // 4 per second in document, but we use 3.9 per second to be safe
linkRateLimit = 0.9 // 1 per second in document, but we use 0.9 per second to be safe
otherRateLimit = 14.9 // 15 per second in document, but we use 14.9 per second to be safe
globalLimiterUserID = "" // Global limiter user ID, used to limit the initial requests.
)
type limiter struct {
usedBy int
list *rate.Limiter
link *rate.Limiter
other *rate.Limiter
}
var limiters = make(map[string]*limiter)
var limitersLock = &sync.Mutex{}
func getLimiterForUser(userid string) *limiter {
limitersLock.Lock()
defer limitersLock.Unlock()
defer func() {
// Clean up limiters that are no longer used.
for id, lim := range limiters {
if lim.usedBy <= 0 && id != globalLimiterUserID { // Do not delete the global limiter.
delete(limiters, id)
}
}
}()
if lim, ok := limiters[userid]; ok {
lim.usedBy++
return lim
}
lim := &limiter{
usedBy: 1,
list: rate.NewLimiter(rate.Limit(listRateLimit), 1),
link: rate.NewLimiter(rate.Limit(linkRateLimit), 1),
other: rate.NewLimiter(rate.Limit(otherRateLimit), 1),
}
limiters[userid] = lim
return lim
}
func (l *limiter) wait(ctx context.Context, typ limiterType) error {
if l == nil {
return fmt.Errorf("driver not init")
}
switch typ {
case limiterList:
return l.list.Wait(ctx)
case limiterLink:
return l.link.Wait(ctx)
case limiterOther:
return l.other.Wait(ctx)
default:
return fmt.Errorf("unknown limiter type")
}
}
func (l *limiter) free() {
if l == nil {
return
}
limitersLock.Lock()
defer limitersLock.Unlock()
l.usedBy--
}
func (d *AliyundriveOpen) wait(ctx context.Context, typ limiterType) error {
if d == nil {
return fmt.Errorf("driver not init")
}
if d.ref != nil {
return d.ref.wait(ctx, typ) // If this is a reference driver, wait on the reference driver.
}
return d.limiter.wait(ctx, typ)
}

View File

@ -50,10 +50,10 @@ func calPartSize(fileSize int64) int64 {
return partSize
}
func (d *AliyundriveOpen) getUploadUrl(ctx context.Context, count int, fileId, uploadId string) ([]PartInfo, error) {
func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]PartInfo, error) {
partInfoList := makePartInfos(count)
var resp CreateResp
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
_, err := d.request("/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": fileId,
@ -69,7 +69,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
if d.InternalUpload {
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, r)
req, err := http.NewRequestWithContext(ctx, "PUT", uploadUrl, r)
if err != nil {
return err
}
@ -84,10 +84,10 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
return nil
}
func (d *AliyundriveOpen) completeUpload(ctx context.Context, fileId, uploadId string) (model.Obj, error) {
func (d *AliyundriveOpen) completeUpload(fileId, uploadId string) (model.Obj, error) {
// 3. complete
var newFile File
_, err := d.request(ctx, limiterOther, "/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
_, err := d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": fileId,
@ -137,8 +137,11 @@ func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error
}
buf := make([]byte, length)
n, err := io.ReadFull(reader, buf)
if n != int(length) {
return "", fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
if err == io.ErrUnexpectedEOF {
return "", fmt.Errorf("can't read data, expected=%d, got=%d", len(buf), n)
}
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(buf), nil
}
@ -180,7 +183,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
createData["pre_hash"] = hash
}
var createResp CreateResp
_, err, e := d.requestReturnErrResp(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp)
})
if err != nil {
@ -191,7 +194,9 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
hash := stream.GetHash().GetHash(utils.SHA1)
if len(hash) != utils.SHA1.Width {
_, hash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, hash, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, utils.SHA1)
if err != nil {
return nil, err
}
@ -205,7 +210,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if err != nil {
return nil, fmt.Errorf("cal proof code error: %s", err.Error())
}
_, err = d.request(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
_, err = d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp)
})
if err != nil {
@ -216,20 +221,17 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if !createResp.RapidUpload {
// 2. normal upload
log.Debugf("[aliyundive_open] normal upload")
ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize), &up)
if err != nil {
return nil, err
}
preTime := time.Now()
var offset, length int64 = 0, partSize
//var length
for i := 0; i < len(createResp.PartInfoList); i++ {
if utils.IsCanceled(ctx) {
return nil, ctx.Err()
}
// refresh upload url if 50 minutes passed
if time.Since(preTime) > 50*time.Minute {
createResp.PartInfoList, err = d.getUploadUrl(ctx, count, createResp.FileId, createResp.UploadId)
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
if err != nil {
return nil, err
}
@ -238,19 +240,22 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if remain := stream.GetSize() - offset; length > remain {
length = remain
}
rd, err := ss.GetSectionReader(offset, length)
if err != nil {
return nil, err
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
if rapidUpload {
srd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
if err != nil {
return nil, err
}
rd = utils.NewMultiReadable(srd)
}
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
err = retry.Do(func() error {
rd.Seek(0, io.SeekStart)
_ = rd.Reset()
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i])
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
ss.FreeSectionReader(rd)
if err != nil {
return nil, err
}
@ -263,5 +268,5 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp)
// 3. complete
return d.completeUpload(ctx, createResp.FileId, createResp.UploadId)
return d.completeUpload(createResp.FileId, createResp.UploadId)
}

View File

@ -19,7 +19,7 @@ import (
// do others that not defined in Driver interface
func (d *AliyundriveOpen) _refreshToken(ctx context.Context) (string, string, error) {
func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
if d.UseOnlineAPI && d.APIAddress != "" {
u := d.APIAddress
var resp struct {
@ -27,17 +27,14 @@ func (d *AliyundriveOpen) _refreshToken(ctx context.Context) (string, string, er
AccessToken string `json:"access_token"`
ErrorMessage string `json:"text"`
}
// 根据AlipanType选项设置driver_txt
driverTxt := "alicloud_qr"
if d.AlipanType == "alipanTV" {
driverTxt = "alicloud_tv"
}
err := d.wait(ctx, limiterOther)
if err != nil {
return "", "", err
}
_, err = base.RestyClient.R().
_, err := base.RestyClient.R().
SetHeader("User-Agent", "Mozilla/5.0 (Macintosh; Apple macOS 15_5) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36 Chrome/138.0.0.0 Openlist/425.6.30").
SetResult(&resp).
SetQueryParams(map[string]string{
@ -57,14 +54,11 @@ func (d *AliyundriveOpen) _refreshToken(ctx context.Context) (string, string, er
}
return resp.RefreshToken, resp.AccessToken, nil
}
// 本地刷新逻辑,必须要求 client_id 和 client_secret
if d.ClientID == "" || d.ClientSecret == "" {
return "", "", fmt.Errorf("empty ClientID or ClientSecret")
}
err := d.wait(ctx, limiterOther)
if err != nil {
return "", "", err
}
url := API_URL + "/oauth/access_token"
//var resp base.TokenResp
var e ErrResp
@ -116,18 +110,18 @@ func getSub(token string) (string, error) {
return utils.Json.Get(bs, "sub").ToString(), nil
}
func (d *AliyundriveOpen) refreshToken(ctx context.Context) error {
func (d *AliyundriveOpen) refreshToken() error {
if d.ref != nil {
return d.ref.refreshToken(ctx)
return d.ref.refreshToken()
}
refresh, access, err := d._refreshToken(ctx)
refresh, access, err := d._refreshToken()
for i := 0; i < 3; i++ {
if err == nil {
break
} else {
log.Errorf("[ali_open] failed to refresh token: %s", err)
}
refresh, access, err = d._refreshToken(ctx)
refresh, access, err = d._refreshToken()
}
if err != nil {
return err
@ -138,12 +132,12 @@ func (d *AliyundriveOpen) refreshToken(ctx context.Context) error {
return nil
}
func (d *AliyundriveOpen) request(ctx context.Context, limitTy limiterType, uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
b, err, _ := d.requestReturnErrResp(ctx, limitTy, uri, method, callback, retry...)
func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
b, err, _ := d.requestReturnErrResp(uri, method, callback, retry...)
return b, err
}
func (d *AliyundriveOpen) requestReturnErrResp(ctx context.Context, limitTy limiterType, uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
req := base.RestyClient.R()
// TODO check whether access_token is expired
req.SetHeader("Authorization", "Bearer "+d.getAccessToken())
@ -155,10 +149,6 @@ func (d *AliyundriveOpen) requestReturnErrResp(ctx context.Context, limitTy limi
}
var e ErrResp
req.SetError(&e)
err := d.wait(ctx, limitTy)
if err != nil {
return nil, err, nil
}
res, err := req.Execute(method, API_URL+uri)
if err != nil {
if res != nil {
@ -169,11 +159,11 @@ func (d *AliyundriveOpen) requestReturnErrResp(ctx context.Context, limitTy limi
isRetry := len(retry) > 0 && retry[0]
if e.Code != "" {
if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.getAccessToken() == "") {
err = d.refreshToken(ctx)
err = d.refreshToken()
if err != nil {
return nil, err, nil
}
return d.requestReturnErrResp(ctx, limitTy, uri, method, callback, true)
return d.requestReturnErrResp(uri, method, callback, true)
}
return nil, fmt.Errorf("%s:%s", e.Code, e.Message), &e
}
@ -182,7 +172,7 @@ func (d *AliyundriveOpen) requestReturnErrResp(ctx context.Context, limitTy limi
func (d *AliyundriveOpen) list(ctx context.Context, data base.Json) (*Files, error) {
var resp Files
_, err := d.request(ctx, limiterList, "/adrive/v1.0/openFile/list", http.MethodPost, func(req *resty.Request) {
_, err := d.request("/adrive/v1.0/openFile/list", http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp)
})
if err != nil {
@ -211,7 +201,7 @@ func (d *AliyundriveOpen) getFiles(ctx context.Context, fileId string) ([]File,
//"video_thumbnail_width": 480,
//"image_thumbnail_width": 480,
}
resp, err := d.list(ctx, data)
resp, err := d.limitList(ctx, data)
if err != nil {
return nil, err
}

View File

@ -2,6 +2,7 @@ package aliyundrive_share
import (
"context"
"fmt"
"net/http"
"time"
@ -11,6 +12,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/rateg"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
@ -23,7 +25,8 @@ type AliyundriveShare struct {
DriveId string
cron *cron.Cron
limiter *limiter
limitList func(ctx context.Context, dir model.Obj) ([]model.Obj, error)
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
}
func (d *AliyundriveShare) Config() driver.Config {
@ -35,26 +38,29 @@ func (d *AliyundriveShare) GetAddition() driver.Additional {
}
func (d *AliyundriveShare) Init(ctx context.Context) error {
d.limiter = getLimiter()
err := d.refreshToken(ctx)
err := d.refreshToken()
if err != nil {
d.limiter.free()
d.limiter = nil
return err
}
err = d.getShareToken(ctx)
err = d.getShareToken()
if err != nil {
d.limiter.free()
d.limiter = nil
return err
}
d.cron = cron.NewCron(time.Hour * 2)
d.cron.Do(func() {
err := d.refreshToken(ctx)
err := d.refreshToken()
if err != nil {
log.Errorf("%+v", err)
}
})
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
Limit: 4,
Bucket: 1,
})
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
Limit: 1,
Bucket: 1,
})
return nil
}
@ -62,14 +68,19 @@ func (d *AliyundriveShare) Drop(ctx context.Context) error {
if d.cron != nil {
d.cron.Stop()
}
d.limiter.free()
d.limiter = nil
d.DriveId = ""
return nil
}
func (d *AliyundriveShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
files, err := d.getFiles(ctx, dir.GetID())
if d.limitList == nil {
return nil, fmt.Errorf("driver not init")
}
return d.limitList(ctx, dir)
}
func (d *AliyundriveShare) list(ctx context.Context, dir model.Obj) ([]model.Obj, error) {
files, err := d.getFiles(dir.GetID())
if err != nil {
return nil, err
}
@ -79,6 +90,13 @@ func (d *AliyundriveShare) List(ctx context.Context, dir model.Obj, args model.L
}
func (d *AliyundriveShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.limitLink == nil {
return nil, fmt.Errorf("driver not init")
}
return d.limitLink(ctx, file)
}
func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Link, error) {
data := base.Json{
"drive_id": d.DriveId,
"file_id": file.GetID(),
@ -87,7 +105,7 @@ func (d *AliyundriveShare) Link(ctx context.Context, file model.Obj, args model.
"share_id": d.ShareId,
}
var resp ShareLinkResp
_, err := d.request(ctx, limiterLink, "https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) {
_, err := d.request("https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) {
req.SetHeader(CanaryHeaderKey, CanaryHeaderValue).SetBody(data).SetResult(&resp)
})
if err != nil {
@ -117,7 +135,7 @@ func (d *AliyundriveShare) Other(ctx context.Context, args model.OtherArgs) (int
default:
return nil, errs.NotSupport
}
_, err := d.request(ctx, limiterOther, url, http.MethodPost, func(req *resty.Request) {
_, err := d.request(url, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetResult(&resp)
})
if err != nil {

View File

@ -1,67 +0,0 @@
package aliyundrive_share
import (
"context"
"fmt"
"golang.org/x/time/rate"
)
// See issue https://github.com/OpenListTeam/OpenList/issues/724
// Seems there is no limit per user.
type limiterType int
const (
limiterList limiterType = iota
limiterLink
limiterOther
)
const (
listRateLimit = 3.9 // 4 per second in document, but we use 3.9 per second to be safe
linkRateLimit = 0.9 // 1 per second in document, but we use 0.9 per second to be safe
otherRateLimit = 14.9 // 15 per second in document, but we use 14.9 per second to be safe
)
type limiter struct {
list *rate.Limiter
link *rate.Limiter
other *rate.Limiter
}
func getLimiter() *limiter {
return &limiter{
list: rate.NewLimiter(rate.Limit(listRateLimit), 1),
link: rate.NewLimiter(rate.Limit(linkRateLimit), 1),
other: rate.NewLimiter(rate.Limit(otherRateLimit), 1),
}
}
func (l *limiter) wait(ctx context.Context, typ limiterType) error {
if l == nil {
return fmt.Errorf("driver not init")
}
switch typ {
case limiterList:
return l.list.Wait(ctx)
case limiterLink:
return l.link.Wait(ctx)
case limiterOther:
return l.other.Wait(ctx)
default:
return fmt.Errorf("unknown limiter type")
}
}
func (l *limiter) free() {
}
func (d *AliyundriveShare) wait(ctx context.Context, typ limiterType) error {
if d == nil {
return fmt.Errorf("driver not init")
}
//if d.ref != nil {
// return d.ref.wait(ctx, typ) // If this is a reference driver, wait on the reference driver.
//}
return d.limiter.wait(ctx, typ)
}

View File

@ -1,7 +1,6 @@
package aliyundrive_share
import (
"context"
"errors"
"fmt"
@ -16,15 +15,11 @@ const (
CanaryHeaderValue = "client=web,app=share,version=v2.3.1"
)
func (d *AliyundriveShare) refreshToken(ctx context.Context) error {
err := d.wait(ctx, limiterOther)
if err != nil {
return err
}
func (d *AliyundriveShare) refreshToken() error {
url := "https://auth.alipan.com/v2/account/token"
var resp base.TokenResp
var e ErrorResp
_, err = base.RestyClient.R().
_, err := base.RestyClient.R().
SetBody(base.Json{"refresh_token": d.RefreshToken, "grant_type": "refresh_token"}).
SetResult(&resp).
SetError(&e).
@ -41,11 +36,7 @@ func (d *AliyundriveShare) refreshToken(ctx context.Context) error {
}
// do others that not defined in Driver interface
func (d *AliyundriveShare) getShareToken(ctx context.Context) error {
err := d.wait(ctx, limiterOther)
if err != nil {
return err
}
func (d *AliyundriveShare) getShareToken() error {
data := base.Json{
"share_id": d.ShareId,
}
@ -54,7 +45,7 @@ func (d *AliyundriveShare) getShareToken(ctx context.Context) error {
}
var e ErrorResp
var resp ShareTokenResp
_, err = base.RestyClient.R().
_, err := base.RestyClient.R().
SetResult(&resp).SetError(&e).SetBody(data).
Post("https://api.alipan.com/v2/share_link/get_share_token")
if err != nil {
@ -67,7 +58,7 @@ func (d *AliyundriveShare) getShareToken(ctx context.Context) error {
return nil
}
func (d *AliyundriveShare) request(ctx context.Context, limitTy limiterType, url, method string, callback base.ReqCallback) ([]byte, error) {
func (d *AliyundriveShare) request(url, method string, callback base.ReqCallback) ([]byte, error) {
var e ErrorResp
req := base.RestyClient.R().
SetError(&e).
@ -80,10 +71,6 @@ func (d *AliyundriveShare) request(ctx context.Context, limitTy limiterType, url
} else {
req.SetBody("{}")
}
err := d.wait(ctx, limitTy)
if err != nil {
return nil, err
}
resp, err := req.Execute(method, url)
if err != nil {
return nil, err
@ -91,14 +78,14 @@ func (d *AliyundriveShare) request(ctx context.Context, limitTy limiterType, url
if e.Code != "" {
if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" {
if e.Code == "AccessTokenInvalid" {
err = d.refreshToken(ctx)
err = d.refreshToken()
} else {
err = d.getShareToken(ctx)
err = d.getShareToken()
}
if err != nil {
return nil, err
}
return d.request(ctx, limitTy, url, method, callback)
return d.request(url, method, callback)
} else {
return nil, errors.New(e.Code + ": " + e.Message)
}
@ -106,7 +93,7 @@ func (d *AliyundriveShare) request(ctx context.Context, limitTy limiterType, url
return resp.Body(), nil
}
func (d *AliyundriveShare) getFiles(ctx context.Context, fileId string) ([]File, error) {
func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
files := make([]File, 0)
data := base.Json{
"image_thumbnail_process": "image/resize,w_160/format,jpeg",
@ -123,10 +110,6 @@ func (d *AliyundriveShare) getFiles(ctx context.Context, fileId string) ([]File,
if data["marker"] == "first" {
data["marker"] = ""
}
err := d.wait(ctx, limiterList)
if err != nil {
return nil, err
}
var e ErrorResp
var resp ListResp
res, err := base.RestyClient.R().
@ -140,11 +123,11 @@ func (d *AliyundriveShare) getFiles(ctx context.Context, fileId string) ([]File,
log.Debugf("aliyundrive share get files: %s", res.String())
if e.Code != "" {
if e.Code == "AccessTokenInvalid" || e.Code == "ShareLinkTokenInvalid" {
err = d.getShareToken(ctx)
err = d.getShareToken()
if err != nil {
return nil, err
}
return d.getFiles(ctx, fileId)
return d.getFiles(fileId)
}
return nil, errors.New(e.Message)
}

View File

@ -203,12 +203,11 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
streamSize := stream.GetSize()
sliceSize := d.getSliceSize(streamSize)
count := 1
if streamSize > sliceSize {
count = int((streamSize + sliceSize - 1) / sliceSize)
}
count := int(streamSize / sliceSize)
lastBlockSize := streamSize % sliceSize
if lastBlockSize == 0 {
if lastBlockSize > 0 {
count++
} else {
lastBlockSize = sliceSize
}

View File

@ -262,12 +262,11 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
// 计算需要的数据
streamSize := stream.GetSize()
count := 1
if streamSize > DEFAULT {
count = int((streamSize + DEFAULT - 1) / DEFAULT)
}
count := int(streamSize / DEFAULT)
lastBlockSize := streamSize % DEFAULT
if lastBlockSize == 0 {
if lastBlockSize > 0 {
count++
} else {
lastBlockSize = DEFAULT
}

View File

@ -255,7 +255,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
},
UpdateProgress: up,
})
req, err := http.NewRequestWithContext(ctx, http.MethodPost, "https://pan-yz.chaoxing.com/upload", r)
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r)
if err != nil {
return err
}

View File

@ -167,7 +167,7 @@ func (d *ChaoXing) Login() (string, error) {
return "", err
}
// Create the request
req, err := http.NewRequest(http.MethodPost, "https://passport2.chaoxing.com/fanyalogin", body)
req, err := http.NewRequest("POST", "https://passport2.chaoxing.com/fanyalogin", body)
if err != nil {
return "", err
}

View File

@ -18,7 +18,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/cookie"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
@ -237,32 +236,28 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
}
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadURLs[0]
credential := u.Credential
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
err := retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, rd))
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
@ -295,7 +290,6 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
@ -307,29 +301,26 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
}
func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadURLs[0]
var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
err := retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl,
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
@ -355,7 +346,6 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
@ -369,31 +359,27 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
}
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
var finish int64 = 0
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
err := retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadURLs[chunk],
driver.NewLimitedUploadStream(ctx, rd))
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
if err != nil {
return err
}
@ -418,7 +404,6 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}

View File

@ -19,7 +19,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
@ -252,32 +251,28 @@ func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u Fi
}
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadUrls[0]
credential := u.Credential
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
err := retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, rd))
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
@ -310,7 +305,6 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
@ -322,29 +316,26 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F
}
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := u.UploadUrls[0]
var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
err := retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl,
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
@ -371,7 +362,6 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}
@ -385,31 +375,27 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u
}
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
DEFAULT := int64(u.ChunkSize)
ss, err := stream.NewStreamSectionReader(file, int(DEFAULT), &up)
if err != nil {
return err
}
var finish int64 = 0
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
err := retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadUrls[chunk],
driver.NewLimitedUploadStream(ctx, rd))
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
if err != nil {
return err
}
@ -435,7 +421,6 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}

View File

@ -292,10 +292,10 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if offset == 0 && limit > 0 {
fileHeader = make([]byte, fileHeaderSize)
n, err := io.ReadFull(remoteReader, fileHeader)
n, _ := io.ReadFull(remoteReader, fileHeader)
if n != fileHeaderSize {
fileHeader = nil
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", fileHeaderSize, n, err)
return nil, fmt.Errorf("can't read data, expected=%d, got=%d", fileHeaderSize, n)
}
if limit <= fileHeaderSize {
remoteReader.Close()
@ -317,7 +317,8 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
}
return readSeeker, nil
}),
SyncClosers: utils.NewSyncClosers(remoteLink),
SyncClosers: utils.NewSyncClosers(remoteLink),
ContentLength: remoteSize,
}, nil
}
@ -401,6 +402,7 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
},
Reader: wrappedIn,
Mimetype: "application/octet-stream",
WebPutAsTask: streamer.NeedStore(),
ForceStreamUpload: true,
Exist: streamer.GetExist(),
}

View File

@ -236,7 +236,7 @@ func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
// 根据文件大小选择上传方式
if file.GetSize() <= 1*utils.MB { // 小于1MB使用普通模式上传
return d.Upload(ctx, &uploadConfig, dstDir, file, up, dataType)
return d.Upload(&uploadConfig, dstDir, file, up, dataType)
}
// 大文件使用分片上传
return d.UploadByMultipart(ctx, &uploadConfig, file.GetSize(), dstDir, file, up, dataType)

View File

@ -129,7 +129,7 @@ type BuiAuditInfo struct {
AuditInfo AuditInfo `json:"audit_info"`
IsAuditing bool `json:"is_auditing"`
AuditStatus int `json:"audit_status"`
LastUpdateTime int64 `json:"last_update_time"`
LastUpdateTime int `json:"last_update_time"`
UnpassReason string `json:"unpass_reason"`
Details Details `json:"details"`
}
@ -184,7 +184,7 @@ type UserInfo struct {
SecUserID string `json:"sec_user_id"`
SessionKey string `json:"session_key"`
UseHmRegion bool `json:"use_hm_region"`
UserCreateTime int64 `json:"user_create_time"`
UserCreateTime int `json:"user_create_time"`
UserID int64 `json:"user_id"`
UserIDStr string `json:"user_id_str"`
UserVerified bool `json:"user_verified"`

View File

@ -24,7 +24,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
@ -448,67 +447,41 @@ func (d *Doubao) uploadNode(uploadConfig *UploadConfig, dir model.Obj, file mode
}
// Upload 普通上传实现
func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
ss, err := stream.NewStreamSectionReader(file, int(file.GetSize()), &up)
if err != nil {
return nil, err
}
reader, err := ss.GetSectionReader(0, file.GetSize())
func (d *Doubao) Upload(config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
data, err := io.ReadAll(file)
if err != nil {
return nil, err
}
// 计算CRC32
crc32Hash := crc32.NewIEEE()
w, err := utils.CopyWithBuffer(crc32Hash, reader)
if w != file.GetSize() {
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", file.GetSize(), w, err)
}
crc32Hash.Write(data)
crc32Value := hex.EncodeToString(crc32Hash.Sum(nil))
// 构建请求路径
uploadNode := config.InnerUploadAddress.UploadNodes[0]
storeInfo := uploadNode.StoreInfos[0]
uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI)
rateLimitedRd := driver.NewLimitedUploadStream(ctx, reader)
err = d._retryOperation("Upload", func() error {
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl, rateLimitedRd)
if err != nil {
return err
}
req.Header = map[string][]string{
"Referer": {BaseURL + "/"},
"Origin": {BaseURL},
"User-Agent": {UserAgent},
"X-Storage-U": {d.UserId},
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {fmt.Sprintf("%d", file.GetSize())},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
resp := UploadResp{}
utils.Json.Unmarshal(bytes, &resp)
if resp.Code != 2000 {
return fmt.Errorf("upload part failed: %s", resp.Message)
} else if resp.Data.Crc32 != crc32Value {
return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, resp.Data.Crc32)
}
return nil
})
ss.FreeSectionReader(reader)
if err != nil {
uploadResp := UploadResp{}
if _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Type": "application/octet-stream",
"Content-Crc32": crc32Value,
"Content-Length": fmt.Sprintf("%d", len(data)),
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
})
req.SetBody(data)
}, &uploadResp); err != nil {
return nil, err
}
if uploadResp.Code != 2000 {
return nil, fmt.Errorf("upload failed: %s", uploadResp.Message)
}
uploadNodeResp, err := d.uploadNode(config, dstDir, file, dataType)
if err != nil {
return nil, err
@ -543,107 +516,68 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
if config.InnerUploadAddress.AdvanceOption.SliceSize > 0 {
chunkSize = int64(config.InnerUploadAddress.AdvanceOption.SliceSize)
}
ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return nil, err
}
totalParts := (fileSize + chunkSize - 1) / chunkSize
// 创建分片信息组
parts := make([]UploadPart, totalParts)
// 缓存文件
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, fmt.Errorf("failed to cache file: %w", err)
}
up(10.0) // 更新进度
// 设置并行上传
thread := min(int(totalParts), d.uploadThread)
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(MaxRetryAttempts),
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(1),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay),
retry.MaxJitter(200*time.Millisecond),
)
retry.DelayType(retry.BackOffDelay))
var partsMutex sync.Mutex
// 并行上传所有分片
hash := crc32.NewIEEE()
for partIndex := range totalParts {
for partIndex := int64(0); partIndex < totalParts; partIndex++ {
if utils.IsCanceled(uploadCtx) {
break
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片编号从1开始
// 计算此分片的大小和偏移
offset := partIndex * chunkSize
size := chunkSize
if partIndex == totalParts-1 {
size = fileSize - offset
}
var reader *stream.SectionReader
var rateLimitedRd io.Reader
crc32Value := ""
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
if reader == nil {
var err error
reader, err = ss.GetSectionReader(offset, size)
if err != nil {
return err
}
hash.Reset()
w, err := utils.CopyWithBuffer(hash, reader)
if w != size {
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", size, w, err)
}
crc32Value = hex.EncodeToString(hash.Sum(nil))
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
}
return nil
},
Do: func(ctx context.Context) error {
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s?uploadid=%s&part_number=%d&phase=transfer", uploadUrl, uploadID, partNumber), rateLimitedRd)
if err != nil {
return err
}
req.Header = map[string][]string{
"Referer": {BaseURL + "/"},
"Origin": {BaseURL},
"User-Agent": {UserAgent},
"X-Storage-U": {d.UserId},
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {fmt.Sprintf("%d", size)},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
uploadResp := UploadResp{}
utils.Json.Unmarshal(bytes, &uploadResp)
if uploadResp.Code != 2000 {
return fmt.Errorf("upload part failed: %s", uploadResp.Message)
} else if uploadResp.Data.Crc32 != crc32Value {
return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32)
}
// 记录成功上传的分片
partsMutex.Lock()
parts[partIndex] = UploadPart{
PartNumber: strconv.FormatInt(partNumber, 10),
Etag: uploadResp.Data.Etag,
Crc32: crc32Value,
}
partsMutex.Unlock()
// 更新进度
progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts)
up(math.Min(progress, 95.0))
return nil
},
After: func(err error) {
ss.FreeSectionReader(reader)
},
threadG.Go(func(ctx context.Context) error {
// 计算此分片的大小和偏移
offset := partIndex * chunkSize
size := chunkSize
if partIndex == totalParts-1 {
size = fileSize - offset
}
limitedReader := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, size))
// 读取数据到内存
data, err := io.ReadAll(limitedReader)
if err != nil {
return fmt.Errorf("failed to read part %d: %w", partNumber, err)
}
// 计算CRC32
crc32Value := calculateCRC32(data)
// 使用_retryOperation上传分片
var uploadPart UploadPart
if err = d._retryOperation(fmt.Sprintf("Upload part %d", partNumber), func() error {
var err error
uploadPart, err = d.uploadPart(config, uploadUrl, uploadID, partNumber, data, crc32Value)
return err
}); err != nil {
return fmt.Errorf("part %d upload failed: %w", partNumber, err)
}
// 记录成功上传的分片
partsMutex.Lock()
parts[partIndex] = UploadPart{
PartNumber: strconv.FormatInt(partNumber, 10),
Etag: uploadPart.Etag,
Crc32: crc32Value,
}
partsMutex.Unlock()
// 更新进度
progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts)
up(math.Min(progress, 95.0))
return nil
})
}
@ -746,6 +680,42 @@ func (d *Doubao) initMultipartUpload(config *UploadConfig, uploadUrl string, sto
return uploadResp.Data.UploadId, nil
}
// 分片上传实现
func (d *Doubao) uploadPart(config *UploadConfig, uploadUrl, uploadID string, partNumber int64, data []byte, crc32Value string) (resp UploadPart, err error) {
uploadResp := UploadResp{}
storeInfo := config.InnerUploadAddress.UploadNodes[0].StoreInfos[0]
_, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Type": "application/octet-stream",
"Content-Crc32": crc32Value,
"Content-Length": fmt.Sprintf("%d", len(data)),
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
})
req.SetQueryParams(map[string]string{
"uploadid": uploadID,
"part_number": strconv.FormatInt(partNumber, 10),
"phase": "transfer",
})
req.SetBody(data)
req.SetContentLength(true)
}, &uploadResp)
if err != nil {
return resp, err
}
if uploadResp.Code != 2000 {
return resp, fmt.Errorf("upload part failed: %s", uploadResp.Message)
} else if uploadResp.Data.Crc32 != crc32Value {
return resp, fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32)
}
return uploadResp.Data, nil
}
// 完成分片上传
func (d *Doubao) completeMultipartUpload(config *UploadConfig, uploadUrl, uploadID string, parts []UploadPart) error {
uploadResp := UploadResp{}
@ -814,6 +784,13 @@ func (d *Doubao) commitMultipartUpload(uploadConfig *UploadConfig) error {
return nil
}
// 计算CRC32
func calculateCRC32(data []byte) string {
hash := crc32.NewIEEE()
hash.Write(data)
return hex.EncodeToString(hash.Sum(nil))
}
// _retryOperation 操作重试
func (d *Doubao) _retryOperation(operation string, fn func() error) error {
return retry.Do(

View File

@ -79,11 +79,11 @@ type ShareInfo struct {
RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"`
CreateTime int64 `json:"create_time"`
UpdateTime int64 `json:"update_time"`
CreateTime int `json:"create_time"`
UpdateTime int `json:"update_time"`
} `json:"first_node"`
NodeCount int `json:"node_count"`
CreateTime int64 `json:"create_time"`
CreateTime int `json:"create_time"`
Channel string `json:"channel"`
InfluencerType int `json:"influencer_type"`
}
@ -111,8 +111,8 @@ type FilePath []struct {
RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"`
CreateTime int64 `json:"create_time"`
UpdateTime int64 `json:"update_time"`
CreateTime int `json:"create_time"`
UpdateTime int `json:"update_time"`
}
type GetFileUrlResp struct {

View File

@ -192,11 +192,12 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
url := d.contentBase + "/2/files/upload_session/append_v2"
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize))
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, reader)
req, err := http.NewRequest(http.MethodPost, url, reader)
if err != nil {
log.Errorf("failed to update file when append to upload session, err: %+v", err)
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken)

View File

@ -13,7 +13,7 @@ type Addition struct {
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
AccessToken string
RefreshToken string `json:"refresh_token" required:"true"`
RootNamespaceId string `json:"RootNamespaceId" required:"false"`
RootNamespaceId string
}
var config = driver.Config{

View File

@ -169,19 +169,13 @@ func (d *Dropbox) getFiles(ctx context.Context, path string) ([]File, error) {
func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset int64, sessionId string) error {
url := d.contentBase + "/2/files/upload_session/finish"
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
req, err := http.NewRequest(http.MethodPost, url, nil)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
if d.RootNamespaceId != "" {
apiPathRootJson, err := d.buildPathRootHeader()
if err != nil {
return err
}
req.Header.Set("Dropbox-API-Path-Root", apiPathRootJson)
}
uploadFinishArgs := UploadFinishArgs{
Commit: struct {
@ -220,19 +214,13 @@ func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset
func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
url := d.contentBase + "/2/files/upload_session/start"
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
req, err := http.NewRequest(http.MethodPost, url, nil)
if err != nil {
return "", err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
if d.RootNamespaceId != "" {
apiPathRootJson, err := d.buildPathRootHeader()
if err != nil {
return "", err
}
req.Header.Set("Dropbox-API-Path-Root", apiPathRootJson)
}
req.Header.Set("Dropbox-API-Arg", "{\"close\":false}")
res, err := base.HttpClient.Do(req)
@ -247,11 +235,3 @@ func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
_ = res.Body.Close()
return sessionId, nil
}
func (d *Dropbox) buildPathRootHeader() (string, error) {
return utils.Json.MarshalToString(map[string]interface{}{
".tag": "root",
"root": d.RootNamespaceId,
})
}

View File

@ -31,13 +31,13 @@ func (c *customTokenSource) Token() (*oauth2.Token, error) {
v.Set("client_id", c.config.ClientID)
v.Set("client_secret", c.config.ClientSecret)
req, err := http.NewRequestWithContext(c.ctx, http.MethodPost, c.config.TokenURL, strings.NewReader(v.Encode()))
req, err := http.NewRequest("POST", c.config.TokenURL, strings.NewReader(v.Encode()))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := http.DefaultClient.Do(req)
resp, err := http.DefaultClient.Do(req.WithContext(c.ctx))
if err != nil {
return nil, err
}

View File

@ -2,15 +2,12 @@ package ftp
import (
"context"
"errors"
"io"
stdpath "path"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/jlaffaye/ftp"
)
@ -19,9 +16,6 @@ type FTP struct {
model.Storage
Addition
conn *ftp.ServerConn
ctx context.Context
cancel context.CancelFunc
}
func (d *FTP) Config() driver.Config {
@ -33,16 +27,12 @@ func (d *FTP) GetAddition() driver.Additional {
}
func (d *FTP) Init(ctx context.Context) error {
d.ctx, d.cancel = context.WithCancel(context.Background())
var err error
d.conn, err = d._login(ctx)
return err
return d._login()
}
func (d *FTP) Drop(ctx context.Context) error {
if d.conn != nil {
_ = d.conn.Quit()
d.cancel()
_ = d.conn.Logout()
}
return nil
}
@ -72,51 +62,25 @@ func (d *FTP) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]m
}
func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
conn, err := d._login(ctx)
if err != nil {
if err := d.login(); err != nil {
return nil, err
}
path := encode(file.GetPath(), d.Encoding)
size := file.GetSize()
resultRangeReader := func(context context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
length := httpRange.Length
if length < 0 || httpRange.Start+length > size {
length = size - httpRange.Start
}
var c *ftp.ServerConn
if ctx == context {
c = conn
} else {
var err error
c, err = d._login(context)
if err != nil {
return nil, err
}
}
resp, err := c.RetrFrom(path, uint64(httpRange.Start))
if err != nil {
return nil, err
}
var close utils.CloseFunc
if context == ctx {
close = resp.Close
} else {
close = func() error {
return errors.Join(resp.Close(), c.Quit())
}
}
return utils.ReadCloser{
Reader: io.LimitReader(resp, length),
Closer: close,
remoteFile := NewFileReader(d.conn, encode(file.GetPath(), d.Encoding), file.GetSize())
if remoteFile != nil && !d.Config().OnlyLinkMFile {
return &model.Link{
RangeReader: &model.FileRangeReader{
RangeReaderIF: stream.RateLimitRangeReaderFunc(stream.GetRangeReaderFromMFile(file.GetSize(), remoteFile)),
},
SyncClosers: utils.NewSyncClosers(remoteFile),
}, nil
}
return &model.Link{
RangeReader: &model.FileRangeReader{
RangeReaderIF: stream.RateLimitRangeReaderFunc(resultRangeReader),
MFile: &stream.RateLimitFile{
File: remoteFile,
Limiter: stream.ServerDownloadLimit,
Ctx: ctx,
},
SyncClosers: utils.NewSyncClosers(utils.CloseFunc(conn.Quit)),
}, nil
}

View File

@ -33,7 +33,7 @@ type Addition struct {
var config = driver.Config{
Name: "FTP",
LocalSort: true,
OnlyLinkMFile: false,
OnlyLinkMFile: true,
DefaultRoot: "/",
NoLinkURL: true,
}

View File

@ -1,8 +1,11 @@
package ftp
import (
"context"
"fmt"
"io"
"os"
"sync"
"sync/atomic"
"time"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
@ -12,32 +15,112 @@ import (
// do others that not defined in Driver interface
func (d *FTP) login() error {
_, err, _ := singleflight.AnyGroup.Do(fmt.Sprintf("FTP.login:%p", d), func() (any, error) {
var err error
if d.conn != nil {
err = d.conn.NoOp()
if err != nil {
d.conn.Quit()
d.conn = nil
}
}
if d.conn == nil {
d.conn, err = d._login(d.ctx)
}
return nil, err
err, _, _ := singleflight.ErrorGroup.Do(fmt.Sprintf("FTP.login:%p", d), func() (error, error) {
return d._login(), nil
})
return err
}
func (d *FTP) _login(ctx context.Context) (*ftp.ServerConn, error) {
conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second), ftp.DialWithContext(ctx))
func (d *FTP) _login() error {
if d.conn != nil {
_, err := d.conn.CurrentDir()
if err == nil {
return nil
}
}
conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second))
if err != nil {
return nil, err
return err
}
err = conn.Login(d.Username, d.Password)
if err != nil {
conn.Quit()
return nil, err
return err
}
return conn, nil
d.conn = conn
return nil
}
// FileReader An FTP file reader that implements io.MFile for seeking.
type FileReader struct {
conn *ftp.ServerConn
resp *ftp.Response
offset atomic.Int64
readAtOffset int64
mu sync.Mutex
path string
size int64
}
func NewFileReader(conn *ftp.ServerConn, path string, size int64) *FileReader {
return &FileReader{
conn: conn,
path: path,
size: size,
}
}
func (r *FileReader) Read(buf []byte) (n int, err error) {
n, err = r.ReadAt(buf, r.offset.Load())
r.offset.Add(int64(n))
return
}
func (r *FileReader) ReadAt(buf []byte, off int64) (n int, err error) {
if off < 0 {
return -1, os.ErrInvalid
}
r.mu.Lock()
defer r.mu.Unlock()
if off != r.readAtOffset {
//have to restart the connection, to correct offset
_ = r.resp.Close()
r.resp = nil
}
if r.resp == nil {
r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
r.readAtOffset = off
if err != nil {
return 0, err
}
}
n, err = r.resp.Read(buf)
r.readAtOffset += int64(n)
return
}
func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
oldOffset := r.offset.Load()
var newOffset int64
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekCurrent:
newOffset = oldOffset + offset
case io.SeekEnd:
return r.size, nil
default:
return -1, os.ErrInvalid
}
if newOffset < 0 {
// offset out of range
return oldOffset, os.ErrInvalid
}
if newOffset == oldOffset {
// offset not changed, so return directly
return oldOffset, nil
}
r.offset.Store(newOffset)
return newOffset, nil
}
func (r *FileReader) Close() error {
if r.resp != nil {
return r.resp.Close()
}
return nil
}

View File

@ -162,7 +162,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
SetBody(driver.NewLimitedUploadStream(ctx, stream))
}, nil)
} else {
err = d.chunkUpload(ctx, stream, putUrl, up)
err = d.chunkUpload(ctx, stream, putUrl)
}
return err
}

View File

@ -5,20 +5,17 @@ import (
"crypto/x509"
"encoding/pem"
"fmt"
"io"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"net/http"
"os"
"regexp"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/avast/retry-go"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
"github.com/golang-jwt/jwt/v4"
@ -254,60 +251,28 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
return res, nil
}
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error {
var defaultChunkSize = d.ChunkSize * 1024 * 1024
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)
if err != nil {
return err
}
var offset int64 = 0
url += "?includeItemsFromAllDrives=true&supportsAllDrives=true"
for offset < file.GetSize() {
for offset < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
chunkSize := min(file.GetSize()-offset, defaultChunkSize)
reader, err := ss.GetSectionReader(offset, chunkSize)
chunkSize := stream.GetSize() - offset
if chunkSize > defaultChunkSize {
chunkSize = defaultChunkSize
}
reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize})
if err != nil {
return err
}
limitedReader := driver.NewLimitedUploadStream(ctx, reader)
err = retry.Do(func() error {
reader.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, limitedReader)
if err != nil {
return err
}
req.Header = map[string][]string{
"Authorization": {"Bearer " + d.AccessToken},
"Content-Length": {strconv.FormatInt(chunkSize, 10)},
"Content-Range": {fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, file.GetSize())},
}
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
bytes, _ := io.ReadAll(res.Body)
var e Error
utils.Json.Unmarshal(bytes, &e)
if e.Error.Code != 0 {
if e.Error.Code == 401 {
err = d.refreshToken()
if err != nil {
return err
}
}
return fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
}
up(float64(offset+chunkSize) / float64(file.GetSize()) * 100)
return nil
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
ss.FreeSectionReader(reader)
reader = driver.NewLimitedUploadStream(ctx, reader)
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Length": strconv.FormatInt(chunkSize, 10),
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()),
}).SetBody(reader).SetContext(ctx)
}, nil)
if err != nil {
return err
}

View File

@ -276,7 +276,9 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
etag := s.GetHash().GetHash(utils.MD5)
var err error
if len(etag) != utils.MD5.Width {
_, etag, err = stream.CacheFullAndHash(s, &up, utils.MD5)
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, etag, err = stream.CacheFullInTempFileAndHash(s, cacheFileProgress, utils.MD5)
if err != nil {
return nil, err
}

View File

@ -3,7 +3,6 @@ package LenovoNasShare
import (
"context"
"net/http"
"net/url"
"strings"
"time"
@ -72,23 +71,7 @@ func (d *LenovoNasShare) List(ctx context.Context, dir model.Obj, args model.Lis
files = append(files, resp.Data.List...)
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
if src.IsDir() {
return src, nil
}
return &model.ObjThumb{
Object: model.Object{
Name: src.GetName(),
Size: src.GetSize(),
Modified: src.ModTime(),
IsFolder: src.IsDir(),
},
Thumbnail: model.Thumbnail{
Thumbnail: func() string {
thumbUrl := d.Host + "/oneproxy/api/share/v1/file/thumb?code=" + d.ShareId + "&stoken=" + d.stoken + "&path=" + url.QueryEscape(src.GetPath())
return thumbUrl
}(),
},
}, nil
return src, nil
})
}

View File

@ -1,92 +0,0 @@
package local
// TestDirCalculateSize tests the directory size calculation
// It should be run with the local driver enabled and directory size calculation set to true
import (
"os"
"path/filepath"
"strconv"
"testing"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
)
func generatedTestDir(dir string, dep, filecount int) {
if dep == 0 {
return
}
for i := 0; i < dep; i++ {
subDir := dir + "/dir" + strconv.Itoa(i)
os.Mkdir(subDir, 0755)
generatedTestDir(subDir, dep-1, filecount)
generatedFiles(subDir, filecount)
}
}
func generatedFiles(path string, count int) error {
for i := 0; i < count; i++ {
filePath := filepath.Join(path, "file"+strconv.Itoa(i)+".txt")
file, err := os.Create(filePath)
if err != nil {
return err
}
// 使用随机ascii字符填充文件
content := make([]byte, 1024) // 1KB file
for j := range content {
content[j] = byte('a' + j%26) // Fill with 'a' to 'z'
}
_, err = file.Write(content)
if err != nil {
return err
}
file.Close()
}
return nil
}
// performance tests for directory size calculation
func BenchmarkCalculateDirSize(t *testing.B) {
// 初始化t的日志
t.Logf("Starting performance test for directory size calculation")
// 确保测试目录存在
if testing.Short() {
t.Skip("Skipping performance test in short mode")
}
// 创建tmp directory for testing
testTempDir := t.TempDir()
err := os.MkdirAll(testTempDir, 0755)
if err != nil {
t.Fatalf("Failed to create test directory: %v", err)
}
defer os.RemoveAll(testTempDir) // Clean up after test
// 构建一个深度为5每层10个文件和10个目录的目录结构
generatedTestDir(testTempDir, 5, 10)
// Initialize the local driver with directory size calculation enabled
d := &Local{
directoryMap: DirectoryMap{
root: testTempDir,
},
Addition: Addition{
DirectorySize: true,
RootPath: driver.RootPath{
RootFolderPath: testTempDir,
},
},
}
//record the start time
t.StartTimer()
// Calculate the directory size
err = d.directoryMap.RecalculateDirSize()
if err != nil {
t.Fatalf("Failed to calculate directory size: %v", err)
}
//record the end time
t.StopTimer()
// Print the size and duration
node, ok := d.directoryMap.Get(d.directoryMap.root)
if !ok {
t.Fatalf("Failed to get root node from directory map")
}
t.Logf("Directory size: %d bytes", node.fileSum+node.directorySum)
t.Logf("Performance test completed successfully")
}

View File

@ -33,9 +33,6 @@ type Local struct {
Addition
mkdirPerm int32
// directory size data
directoryMap DirectoryMap
// zero means no limit
thumbConcurrency int
thumbTokenBucket TokenBucket
@ -69,15 +66,6 @@ func (d *Local) Init(ctx context.Context) error {
}
d.Addition.RootFolderPath = abs
}
if d.DirectorySize {
d.directoryMap.root = d.GetRootPath()
_, err := d.directoryMap.CalculateDirSize(d.GetRootPath())
if err != nil {
return err
}
} else {
d.directoryMap.Clear()
}
if d.ThumbCacheFolder != "" && !utils.Exists(d.ThumbCacheFolder) {
err := os.MkdirAll(d.ThumbCacheFolder, os.FileMode(d.mkdirPerm))
if err != nil {
@ -136,9 +124,6 @@ func (d *Local) GetAddition() driver.Additional {
func (d *Local) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
fullPath := dir.GetPath()
rawFiles, err := readDir(fullPath)
if d.DirectorySize && args.Refresh {
d.directoryMap.RecalculateDirSize()
}
if err != nil {
return nil, err
}
@ -162,12 +147,7 @@ func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string
}
isFolder := f.IsDir() || isSymlinkDir(f, fullPath)
var size int64
if isFolder {
node, ok := d.directoryMap.Get(filepath.Join(fullPath, f.Name()))
if ok {
size = node.fileSum + node.directorySum
}
} else {
if !isFolder {
size = f.Size()
}
var ctime time.Time
@ -206,12 +186,7 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
isFolder := f.IsDir() || isSymlinkDir(f, path)
size := f.Size()
if isFolder {
node, ok := d.directoryMap.Get(path)
if ok {
size = node.fileSum + node.directorySum
}
} else {
size = f.Size()
size = 0
}
var ctime time.Time
t, err := times.Stat(path)
@ -270,12 +245,13 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if err != nil {
return nil, err
}
link.ContentLength = file.GetSize()
link.MFile = open
}
link.AddIfCloser(link.MFile)
if !d.Config().OnlyLinkMFile {
link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, link.MFile)
if link.MFile != nil && !d.Config().OnlyLinkMFile {
link.AddIfCloser(link.MFile)
link.RangeReader = &model.FileRangeReader{
RangeReaderIF: stream.GetRangeReaderFromMFile(file.GetSize(), link.MFile),
}
link.MFile = nil
}
return link, nil
@ -296,31 +272,22 @@ func (d *Local) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
if utils.IsSubPath(srcPath, dstPath) {
return fmt.Errorf("the destination folder is a subfolder of the source folder")
}
err := os.Rename(srcPath, dstPath)
if err != nil && strings.Contains(err.Error(), "invalid cross-device link") {
// 跨设备移动,先复制再删除
if err := d.Copy(ctx, srcObj, dstDir); err != nil {
if err := os.Rename(srcPath, dstPath); err != nil && strings.Contains(err.Error(), "invalid cross-device link") {
// Handle cross-device file move in local driver
if err = d.Copy(ctx, srcObj, dstDir); err != nil {
return err
} else {
// Directly remove file without check recycle bin if successfully copied
if srcObj.IsDir() {
err = os.RemoveAll(srcObj.GetPath())
} else {
err = os.Remove(srcObj.GetPath())
}
return err
}
// 复制成功后直接删除源文件/文件夹
if srcObj.IsDir() {
return os.RemoveAll(srcObj.GetPath())
}
return os.Remove(srcObj.GetPath())
} else {
return err
}
if err == nil {
srcParent := filepath.Dir(srcPath)
dstParent := filepath.Dir(dstPath)
if d.directoryMap.Has(srcParent) {
d.directoryMap.UpdateDirSize(srcParent)
d.directoryMap.UpdateDirParents(srcParent)
}
if d.directoryMap.Has(dstParent) {
d.directoryMap.UpdateDirSize(dstParent)
d.directoryMap.UpdateDirParents(dstParent)
}
}
return err
}
func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
@ -330,14 +297,6 @@ func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) er
if err != nil {
return err
}
if srcObj.IsDir() {
if d.directoryMap.Has(srcPath) {
d.directoryMap.DeleteDirNode(srcPath)
d.directoryMap.CalculateDirSize(dstPath)
}
}
return nil
}
@ -348,21 +307,11 @@ func (d *Local) Copy(_ context.Context, srcObj, dstDir model.Obj) error {
return fmt.Errorf("the destination folder is a subfolder of the source folder")
}
// Copy using otiai10/copy to perform more secure & efficient copy
err := cp.Copy(srcPath, dstPath, cp.Options{
return cp.Copy(srcPath, dstPath, cp.Options{
Sync: true, // Sync file to disk after copy, may have performance penalty in filesystem such as ZFS
PreserveTimes: true,
PreserveOwner: true,
})
if err != nil {
return err
}
if d.directoryMap.Has(filepath.Dir(dstPath)) {
d.directoryMap.UpdateDirSize(filepath.Dir(dstPath))
d.directoryMap.UpdateDirParents(filepath.Dir(dstPath))
}
return nil
}
func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
@ -383,19 +332,6 @@ func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
if err != nil {
return err
}
if obj.IsDir() {
if d.directoryMap.Has(obj.GetPath()) {
d.directoryMap.DeleteDirNode(obj.GetPath())
d.directoryMap.UpdateDirSize(filepath.Dir(obj.GetPath()))
d.directoryMap.UpdateDirParents(filepath.Dir(obj.GetPath()))
}
} else {
if d.directoryMap.Has(filepath.Dir(obj.GetPath())) {
d.directoryMap.UpdateDirSize(filepath.Dir(obj.GetPath()))
d.directoryMap.UpdateDirParents(filepath.Dir(obj.GetPath()))
}
}
return nil
}
@ -419,11 +355,6 @@ func (d *Local) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
if err != nil {
log.Errorf("[local] failed to change time of %s: %s", fullPath, err)
}
if d.directoryMap.Has(dstDir.GetPath()) {
d.directoryMap.UpdateDirSize(dstDir.GetPath())
d.directoryMap.UpdateDirParents(dstDir.GetPath())
}
return nil
}

View File

@ -7,7 +7,6 @@ import (
type Addition struct {
driver.RootPath
DirectorySize bool `json:"directory_size" default:"false" help:"This might impact host performance"`
Thumbnail bool `json:"thumbnail" required:"true" help:"enable thumbnail"`
ThumbCacheFolder string `json:"thumb_cache_folder"`
ThumbConcurrency string `json:"thumb_concurrency" default:"16" required:"false" help:"Number of concurrent thumbnail generation goroutines. This controls how many thumbnails can be generated in parallel."`
@ -28,8 +27,6 @@ var config = driver.Config{
func init() {
op.RegisterDriver(func() driver.Driver {
return &Local{
directoryMap: DirectoryMap{},
}
return &Local{}
})
}

View File

@ -7,12 +7,9 @@ import (
"io/fs"
"os"
"path/filepath"
"runtime"
"slices"
"sort"
"strconv"
"strings"
"sync"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/model"
@ -22,8 +19,7 @@ import (
)
func isSymlinkDir(f fs.FileInfo, path string) bool {
if f.Mode()&os.ModeSymlink == os.ModeSymlink ||
(runtime.GOOS == "windows" && f.Mode()&os.ModeIrregular == os.ModeIrregular) { // os.ModeIrregular is Junction bit in Windows
if f.Mode()&os.ModeSymlink == os.ModeSymlink {
dst, err := os.Readlink(filepath.Join(path, f.Name()))
if err != nil {
return false
@ -155,253 +151,3 @@ func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
}
return &buf, nil, nil
}
type DirectoryMap struct {
root string
data sync.Map
}
type DirectoryNode struct {
fileSum int64
directorySum int64
children []string
}
type DirectoryTask struct {
path string
cache *DirectoryTaskCache
}
type DirectoryTaskCache struct {
fileSum int64
children []string
}
func (m *DirectoryMap) Has(path string) bool {
_, ok := m.data.Load(path)
return ok
}
func (m *DirectoryMap) Get(path string) (*DirectoryNode, bool) {
value, ok := m.data.Load(path)
if !ok {
return &DirectoryNode{}, false
}
node, ok := value.(*DirectoryNode)
if !ok {
return &DirectoryNode{}, false
}
return node, true
}
func (m *DirectoryMap) Set(path string, node *DirectoryNode) {
m.data.Store(path, node)
}
func (m *DirectoryMap) Delete(path string) {
m.data.Delete(path)
}
func (m *DirectoryMap) Clear() {
m.data.Clear()
}
func (m *DirectoryMap) RecalculateDirSize() error {
m.Clear()
if m.root == "" {
return fmt.Errorf("root path is not set")
}
size, err := m.CalculateDirSize(m.root)
if err != nil {
return err
}
if node, ok := m.Get(m.root); ok {
node.fileSum = size
node.directorySum = size
}
return nil
}
func (m *DirectoryMap) CalculateDirSize(dirname string) (int64, error) {
stack := []DirectoryTask{
{path: dirname},
}
for len(stack) > 0 {
task := stack[len(stack)-1]
stack = stack[:len(stack)-1]
if task.cache != nil {
directorySum := int64(0)
for _, filename := range task.cache.children {
child, ok := m.Get(filepath.Join(task.path, filename))
if !ok {
return 0, fmt.Errorf("child node not found")
}
directorySum += child.fileSum + child.directorySum
}
m.Set(task.path, &DirectoryNode{
fileSum: task.cache.fileSum,
directorySum: directorySum,
children: task.cache.children,
})
continue
}
files, err := readDir(task.path)
if err != nil {
return 0, err
}
fileSum := int64(0)
directorySum := int64(0)
children := []string{}
queue := []DirectoryTask{}
for _, f := range files {
fullpath := filepath.Join(task.path, f.Name())
isFolder := f.IsDir() || isSymlinkDir(f, fullpath)
if isFolder {
if node, ok := m.Get(fullpath); ok {
directorySum += node.fileSum + node.directorySum
} else {
queue = append(queue, DirectoryTask{
path: fullpath,
})
}
children = append(children, f.Name())
} else {
fileSum += f.Size()
}
}
if len(queue) > 0 {
stack = append(stack, DirectoryTask{
path: task.path,
cache: &DirectoryTaskCache{
fileSum: fileSum,
children: children,
},
})
stack = append(stack, queue...)
continue
}
m.Set(task.path, &DirectoryNode{
fileSum: fileSum,
directorySum: directorySum,
children: children,
})
}
if node, ok := m.Get(dirname); ok {
return node.fileSum + node.directorySum, nil
}
return 0, nil
}
func (m *DirectoryMap) UpdateDirSize(dirname string) (int64, error) {
node, ok := m.Get(dirname)
if !ok {
return 0, fmt.Errorf("directory node not found")
}
files, err := readDir(dirname)
if err != nil {
return 0, err
}
fileSum := int64(0)
directorySum := int64(0)
children := []string{}
for _, f := range files {
fullpath := filepath.Join(dirname, f.Name())
isFolder := f.IsDir() || isSymlinkDir(f, fullpath)
if isFolder {
if node, ok := m.Get(fullpath); ok {
directorySum += node.fileSum + node.directorySum
} else {
value, err := m.CalculateDirSize(fullpath)
if err != nil {
return 0, err
}
directorySum += value
}
children = append(children, f.Name())
} else {
fileSum += f.Size()
}
}
for _, c := range node.children {
if !slices.Contains(children, c) {
m.DeleteDirNode(filepath.Join(dirname, c))
}
}
node.fileSum = fileSum
node.directorySum = directorySum
node.children = children
return fileSum + directorySum, nil
}
func (m *DirectoryMap) UpdateDirParents(dirname string) error {
parentPath := filepath.Dir(dirname)
for parentPath != m.root && !strings.HasPrefix(m.root, parentPath) {
if node, ok := m.Get(parentPath); ok {
directorySum := int64(0)
for _, c := range node.children {
child, ok := m.Get(filepath.Join(parentPath, c))
if !ok {
return fmt.Errorf("child node not found")
}
directorySum += child.fileSum + child.directorySum
}
node.directorySum = directorySum
}
parentPath = filepath.Dir(parentPath)
}
return nil
}
func (m *DirectoryMap) DeleteDirNode(dirname string) error {
stack := []string{dirname}
for len(stack) > 0 {
current := stack[len(stack)-1]
stack = stack[:len(stack)-1]
if node, ok := m.Get(current); ok {
for _, filename := range node.children {
stack = append(stack, filepath.Join(current, filename))
}
m.Delete(current)
}
}
return nil
}

View File

@ -180,7 +180,7 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, file model.FileS
if err != nil {
return err
}
tempFile, err := file.CacheFullAndWriter(&up, nil)
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return err
}

View File

@ -4,7 +4,6 @@ import (
"context"
"errors"
"io"
"net/http"
"time"
"github.com/go-resty/resty/v2"
@ -73,7 +72,7 @@ func (d *Misskey) getFiles(dir model.Obj) ([]model.Obj, error) {
} else {
body = map[string]string{}
}
err := d.request("/files", http.MethodPost, setBody(body), &files)
err := d.request("/files", "POST", setBody(body), &files)
if err != nil {
return []model.Obj{}, err
}
@ -90,7 +89,7 @@ func (d *Misskey) getFolders(dir model.Obj) ([]model.Obj, error) {
} else {
body = map[string]string{}
}
err := d.request("/folders", http.MethodPost, setBody(body), &folders)
err := d.request("/folders", "POST", setBody(body), &folders)
if err != nil {
return []model.Obj{}, err
}
@ -107,7 +106,7 @@ func (d *Misskey) list(dir model.Obj) ([]model.Obj, error) {
func (d *Misskey) link(file model.Obj) (*model.Link, error) {
var mFile MFile
err := d.request("/files/show", http.MethodPost, setBody(map[string]string{"fileId": file.GetID()}), &mFile)
err := d.request("/files/show", "POST", setBody(map[string]string{"fileId": file.GetID()}), &mFile)
if err != nil {
return nil, err
}
@ -118,7 +117,7 @@ func (d *Misskey) link(file model.Obj) (*model.Link, error) {
func (d *Misskey) makeDir(parentDir model.Obj, dirName string) (model.Obj, error) {
var folder MFolder
err := d.request("/folders/create", http.MethodPost, setBody(map[string]interface{}{"parentId": handleFolderId(parentDir), "name": dirName}), &folder)
err := d.request("/folders/create", "POST", setBody(map[string]interface{}{"parentId": handleFolderId(parentDir), "name": dirName}), &folder)
if err != nil {
return nil, err
}
@ -128,11 +127,11 @@ func (d *Misskey) makeDir(parentDir model.Obj, dirName string) (model.Obj, error
func (d *Misskey) move(srcObj, dstDir model.Obj) (model.Obj, error) {
if srcObj.IsDir() {
var folder MFolder
err := d.request("/folders/update", http.MethodPost, setBody(map[string]interface{}{"folderId": srcObj.GetID(), "parentId": handleFolderId(dstDir)}), &folder)
err := d.request("/folders/update", "POST", setBody(map[string]interface{}{"folderId": srcObj.GetID(), "parentId": handleFolderId(dstDir)}), &folder)
return mFolder2Object(folder), err
} else {
var file MFile
err := d.request("/files/update", http.MethodPost, setBody(map[string]interface{}{"fileId": srcObj.GetID(), "folderId": handleFolderId(dstDir)}), &file)
err := d.request("/files/update", "POST", setBody(map[string]interface{}{"fileId": srcObj.GetID(), "folderId": handleFolderId(dstDir)}), &file)
return mFile2Object(file), err
}
}
@ -140,11 +139,11 @@ func (d *Misskey) move(srcObj, dstDir model.Obj) (model.Obj, error) {
func (d *Misskey) rename(srcObj model.Obj, newName string) (model.Obj, error) {
if srcObj.IsDir() {
var folder MFolder
err := d.request("/folders/update", http.MethodPost, setBody(map[string]string{"folderId": srcObj.GetID(), "name": newName}), &folder)
err := d.request("/folders/update", "POST", setBody(map[string]string{"folderId": srcObj.GetID(), "name": newName}), &folder)
return mFolder2Object(folder), err
} else {
var file MFile
err := d.request("/files/update", http.MethodPost, setBody(map[string]string{"fileId": srcObj.GetID(), "name": newName}), &file)
err := d.request("/files/update", "POST", setBody(map[string]string{"fileId": srcObj.GetID(), "name": newName}), &file)
return mFile2Object(file), err
}
}
@ -172,7 +171,7 @@ func (d *Misskey) copy(srcObj, dstDir model.Obj) (model.Obj, error) {
if err != nil {
return nil, err
}
err = d.request("/files/upload-from-url", http.MethodPost, setBody(map[string]interface{}{"url": url.URL, "folderId": handleFolderId(dstDir)}), &file)
err = d.request("/files/upload-from-url", "POST", setBody(map[string]interface{}{"url": url.URL, "folderId": handleFolderId(dstDir)}), &file)
if err != nil {
return nil, err
}
@ -182,10 +181,10 @@ func (d *Misskey) copy(srcObj, dstDir model.Obj) (model.Obj, error) {
func (d *Misskey) remove(obj model.Obj) error {
if obj.IsDir() {
err := d.request("/folders/delete", http.MethodPost, setBody(map[string]string{"folderId": obj.GetID()}), nil)
err := d.request("/folders/delete", "POST", setBody(map[string]string{"folderId": obj.GetID()}), nil)
return err
} else {
err := d.request("/files/delete", http.MethodPost, setBody(map[string]string{"fileId": obj.GetID()}), nil)
err := d.request("/files/delete", "POST", setBody(map[string]string{"fileId": obj.GetID()}), nil)
return err
}
}

View File

@ -263,7 +263,7 @@ func (d *MoPan) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
file, err := stream.CacheFullAndWriter(&up, nil)
file, err := stream.CacheFullInTempFile()
if err != nil {
return nil, err
}

View File

@ -55,7 +55,9 @@ func (lrc *LyricObj) getProxyLink(ctx context.Context) *model.Link {
func (lrc *LyricObj) getLyricLink() *model.Link {
return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(int64(len(lrc.lyric)), strings.NewReader(lrc.lyric)),
RangeReader: &model.FileRangeReader{
RangeReaderIF: stream.GetRangeReaderFromMFile(int64(len(lrc.lyric)), strings.NewReader(lrc.lyric)),
},
}
}

View File

@ -223,7 +223,7 @@ func (d *NeteaseMusic) removeSongObj(file model.Obj) error {
}
func (d *NeteaseMusic) putSongStream(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) error {
tmp, err := stream.CacheFullAndWriter(&up, nil)
tmp, err := stream.CacheFullInTempFile()
if err != nil {
return err
}

View File

@ -1,6 +1,7 @@
package onedrive
import (
"bytes"
"context"
"errors"
"fmt"
@ -14,7 +15,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
@ -238,29 +238,26 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
if err != nil {
return err
}
DEFAULT := d.ChunkSize * 1024 * 1024
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
var finish int64 = 0
DEFAULT := d.ChunkSize * 1024 * 1024
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Onedrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
utils.Log.Debugf("[Onedrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl,
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
@ -286,7 +283,6 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}

View File

@ -1,6 +1,7 @@
package onedrive_app
import (
"bytes"
"context"
"errors"
"fmt"
@ -14,7 +15,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
@ -152,29 +152,26 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
if err != nil {
return err
}
DEFAULT := d.ChunkSize * 1024 * 1024
ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT), &up)
if err != nil {
return err
}
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
var finish int64 = 0
DEFAULT := d.ChunkSize * 1024 * 1024
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[OnedriveAPP] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
rd, err := ss.GetSectionReader(finish, byteSize)
if err != nil {
return err
}
err = retry.Do(
func() error {
rd.Seek(0, io.SeekStart)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd))
utils.Log.Debugf("[OnedriveAPP] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl,
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
@ -200,7 +197,6 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second),
)
ss.FreeSectionReader(rd)
if err != nil {
return err
}

View File

@ -38,14 +38,14 @@ func (d *OnedriveSharelink) Init(ctx context.Context) error {
d.cron = cron.NewCron(time.Hour * 1)
d.cron.Do(func() {
var err error
d.Headers, err = d.getHeaders(ctx)
d.Headers, err = d.getHeaders()
if err != nil {
log.Errorf("%+v", err)
}
})
// Get initial headers
d.Headers, err = d.getHeaders(ctx)
d.Headers, err = d.getHeaders()
if err != nil {
return err
}
@ -59,7 +59,7 @@ func (d *OnedriveSharelink) Drop(ctx context.Context) error {
func (d *OnedriveSharelink) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
path := dir.GetPath()
files, err := d.getFiles(ctx, path)
files, err := d.getFiles(path)
if err != nil {
return nil, err
}
@ -82,7 +82,7 @@ func (d *OnedriveSharelink) Link(ctx context.Context, file model.Obj, args model
if d.HeaderTime < time.Now().Unix()-1800 {
var err error
log.Debug("headers are older than 30 minutes, get new headers")
header, err = d.getHeaders(ctx)
header, err = d.getHeaders()
if err != nil {
return nil, err
}

View File

@ -1,7 +1,6 @@
package onedrive_sharelink
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
@ -132,7 +131,7 @@ func getAttrValue(n *html.Node, key string) string {
}
// getHeaders constructs and returns the necessary HTTP headers for accessing the OneDrive share link
func (d *OnedriveSharelink) getHeaders(ctx context.Context) (http.Header, error) {
func (d *OnedriveSharelink) getHeaders() (http.Header, error) {
header := http.Header{}
header.Set("User-Agent", base.UserAgent)
header.Set("accept-language", "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6")
@ -143,7 +142,7 @@ func (d *OnedriveSharelink) getHeaders(ctx context.Context) (http.Header, error)
if d.ShareLinkPassword == "" {
// Create a no-redirect client
clientNoDirect := NewNoRedirectCLient()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, d.ShareLinkURL, nil)
req, err := http.NewRequest("GET", d.ShareLinkURL, nil)
if err != nil {
return nil, err
}
@ -181,9 +180,9 @@ func (d *OnedriveSharelink) getHeaders(ctx context.Context) (http.Header, error)
}
// getFiles retrieves the files from the OneDrive share link at the specified path
func (d *OnedriveSharelink) getFiles(ctx context.Context, path string) ([]Item, error) {
func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) {
clientNoDirect := NewNoRedirectCLient()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, d.ShareLinkURL, nil)
req, err := http.NewRequest("GET", d.ShareLinkURL, nil)
if err != nil {
return nil, err
}
@ -222,11 +221,11 @@ func (d *OnedriveSharelink) getFiles(ctx context.Context, path string) ([]Item,
// Get redirectUrl
answer, err := clientNoDirect.Do(req)
if err != nil {
d.Headers, err = d.getHeaders(ctx)
d.Headers, err = d.getHeaders()
if err != nil {
return nil, err
}
return d.getFiles(ctx, path)
return d.getFiles(path)
}
defer answer.Body.Close()
re := regexp.MustCompile(`templateUrl":"(.*?)"`)
@ -291,7 +290,7 @@ func (d *OnedriveSharelink) getFiles(ctx context.Context, path string) ([]Item,
client := &http.Client{}
postUrl := strings.Join(redirectSplitURL[:len(redirectSplitURL)-3], "/") + "/_api/v2.1/graphql"
req, err = http.NewRequest(http.MethodPost, postUrl, strings.NewReader(graphqlVar))
req, err = http.NewRequest("POST", postUrl, strings.NewReader(graphqlVar))
if err != nil {
return nil, err
}
@ -299,11 +298,11 @@ func (d *OnedriveSharelink) getFiles(ctx context.Context, path string) ([]Item,
resp, err := client.Do(req)
if err != nil {
d.Headers, err = d.getHeaders(ctx)
d.Headers, err = d.getHeaders()
if err != nil {
return nil, err
}
return d.getFiles(ctx, path)
return d.getFiles(path)
}
defer resp.Body.Close()
var graphqlReq GraphQLRequest
@ -324,31 +323,31 @@ func (d *OnedriveSharelink) getFiles(ctx context.Context, path string) ([]Item,
graphqlReqNEW := GraphQLNEWRequest{}
postUrl = strings.Join(redirectSplitURL[:len(redirectSplitURL)-3], "/") + "/_api/web/GetListUsingPath(DecodedUrl=@a1)/RenderListDataAsStream" + nextHref
req, _ = http.NewRequest(http.MethodPost, postUrl, strings.NewReader(renderListDataAsStreamVar))
req, _ = http.NewRequest("POST", postUrl, strings.NewReader(renderListDataAsStreamVar))
req.Header = tempHeader
resp, err := client.Do(req)
if err != nil {
d.Headers, err = d.getHeaders(ctx)
d.Headers, err = d.getHeaders()
if err != nil {
return nil, err
}
return d.getFiles(ctx, path)
return d.getFiles(path)
}
defer resp.Body.Close()
json.NewDecoder(resp.Body).Decode(&graphqlReqNEW)
for graphqlReqNEW.ListData.NextHref != "" {
graphqlReqNEW = GraphQLNEWRequest{}
postUrl = strings.Join(redirectSplitURL[:len(redirectSplitURL)-3], "/") + "/_api/web/GetListUsingPath(DecodedUrl=@a1)/RenderListDataAsStream" + nextHref
req, _ = http.NewRequest(http.MethodPost, postUrl, strings.NewReader(renderListDataAsStreamVar))
req, _ = http.NewRequest("POST", postUrl, strings.NewReader(renderListDataAsStreamVar))
req.Header = tempHeader
resp, err := client.Do(req)
if err != nil {
d.Headers, err = d.getHeaders(ctx)
d.Headers, err = d.getHeaders()
if err != nil {
return nil, err
}
return d.getFiles(ctx, path)
return d.getFiles(path)
}
defer resp.Body.Close()
json.NewDecoder(resp.Body).Decode(&graphqlReqNEW)

View File

@ -12,7 +12,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
hash_extend "github.com/OpenListTeam/OpenList/v4/pkg/utils/hash"
"github.com/go-resty/resty/v2"
@ -141,8 +140,7 @@ func (d *PikPak) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}
_, err := d.request(fmt.Sprintf("https://api-drive.mypikpak.net/drive/v1/files/%s", file.GetID()),
http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx).
SetQueryParams(queryParams)
req.SetQueryParams(queryParams)
}, &resp)
if err != nil {
return nil, err
@ -161,7 +159,7 @@ func (d *PikPak) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
func (d *PikPak) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files", http.MethodPost, func(req *resty.Request) {
req.SetContext(ctx).SetBody(base.Json{
req.SetBody(base.Json{
"kind": "drive#folder",
"parent_id": parentDir.GetID(),
"name": dirName,
@ -172,7 +170,7 @@ func (d *PikPak) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
func (d *PikPak) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files:batchMove", http.MethodPost, func(req *resty.Request) {
req.SetContext(ctx).SetBody(base.Json{
req.SetBody(base.Json{
"ids": []string{srcObj.GetID()},
"to": base.Json{
"parent_id": dstDir.GetID(),
@ -184,7 +182,7 @@ func (d *PikPak) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
func (d *PikPak) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files/"+srcObj.GetID(), http.MethodPatch, func(req *resty.Request) {
req.SetContext(ctx).SetBody(base.Json{
req.SetBody(base.Json{
"name": newName,
})
}, nil)
@ -193,7 +191,7 @@ func (d *PikPak) Rename(ctx context.Context, srcObj model.Obj, newName string) e
func (d *PikPak) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files:batchCopy", http.MethodPost, func(req *resty.Request) {
req.SetContext(ctx).SetBody(base.Json{
req.SetBody(base.Json{
"ids": []string{srcObj.GetID()},
"to": base.Json{
"parent_id": dstDir.GetID(),
@ -205,7 +203,7 @@ func (d *PikPak) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files:batchTrash", http.MethodPost, func(req *resty.Request) {
req.SetContext(ctx).SetBody(base.Json{
req.SetBody(base.Json{
"ids": []string{obj.GetID()},
})
}, nil)
@ -213,11 +211,15 @@ func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
sha1Str := stream.GetHash().GetHash(hash_extend.GCID)
hi := stream.GetHash()
sha1Str := hi.GetHash(hash_extend.GCID)
if len(sha1Str) < hash_extend.GCID.Width {
var err error
_, sha1Str, err = streamPkg.CacheFullAndHash(stream, &up, hash_extend.GCID, stream.GetSize())
tFile, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
sha1Str, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
if err != nil {
return err
}
@ -275,8 +277,7 @@ func (d *PikPak) OfflineDownload(ctx context.Context, fileUrl string, parentDir
var resp OfflineDownloadResp
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files", http.MethodPost, func(req *resty.Request) {
req.SetContext(ctx).
SetBody(requestBody)
req.SetBody(requestBody)
}, &resp)
if err != nil {

View File

@ -438,19 +438,20 @@ func (d *PikPak) UploadByOSS(ctx context.Context, params *S3Params, s model.File
}
func (d *PikPak) UploadByMultipart(ctx context.Context, params *S3Params, fileSize int64, s model.FileStreamer, up driver.UpdateProgress) error {
tmpF, err := s.CacheFullAndWriter(&up, nil)
if err != nil {
return err
}
var (
chunks []oss.FileChunk
parts []oss.UploadPart
imur oss.InitiateMultipartUploadResult
ossClient *oss.Client
bucket *oss.Bucket
err error
)
tmpF, err := s.CacheFullInTempFile()
if err != nil {
return err
}
if ossClient, err = oss.New(params.Endpoint, params.AccessKeyID, params.AccessKeySecret); err != nil {
return err
}

View File

@ -14,6 +14,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
)
@ -157,7 +158,9 @@ func (d *QuarkOpen) Put(ctx context.Context, dstDir model.Obj, stream model.File
}
if len(writers) > 0 {
_, err := stream.CacheFullAndWriter(&up, io.MultiWriter(writers...))
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, err := streamPkg.CacheFullInTempFileAndWriter(stream, cacheFileProgress, io.MultiWriter(writers...))
if err != nil {
return err
}

View File

@ -8,15 +8,14 @@ import (
"encoding/hex"
"errors"
"fmt"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/google/uuid"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/google/uuid"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
@ -245,8 +244,11 @@ func (d *QuarkOpen) generateProofCode(file model.FileStreamer, proofSeed string,
// 读取数据
buf := make([]byte, length)
n, err := io.ReadFull(reader, buf)
if n != int(length) {
return "", fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
if errors.Is(err, io.ErrUnexpectedEOF) {
return "", fmt.Errorf("can't read data, expected=%d, got=%d", length, n)
}
if err != nil {
return "", fmt.Errorf("failed to read data: %w", err)
}
// Base64编码

View File

@ -13,6 +13,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
@ -143,7 +144,9 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
}
if len(writers) > 0 {
_, err := stream.CacheFullAndWriter(&up, io.MultiWriter(writers...))
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, err := streamPkg.CacheFullInTempFileAndWriter(stream, cacheFileProgress, io.MultiWriter(writers...))
if err != nil {
return err
}

View File

@ -1,9 +1,8 @@
package quark
import (
"time"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
)
@ -160,7 +159,7 @@ type TranscodingResp struct {
Codec string `json:"codec"`
Channels int `json:"channels"`
} `json:"audio"`
UpdateTime int64 `json:"update_time"`
UpdateTime int `json:"update_time"`
URL string `json:"url"`
Resolution string `json:"resolution"`
HlsType string `json:"hls_type"`

View File

@ -3,7 +3,6 @@ package quark_uc_tv
import (
"context"
"fmt"
"net/http"
"strconv"
"time"
@ -97,7 +96,7 @@ func (d *QuarkUCTV) List(ctx context.Context, dir model.Obj, args model.ListArgs
pageSize := int64(100)
for {
var filesData FilesData
_, err := d.request(ctx, "/file", http.MethodGet, func(req *resty.Request) {
_, err := d.request(ctx, "/file", "GET", func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"method": "list",
"parent_fid": dir.GetID(),

View File

@ -95,7 +95,7 @@ func (d *QuarkUCTV) getLoginCode(ctx context.Context) (string, error) {
QrData string `json:"qr_data"`
QueryToken string `json:"query_token"`
}
_, err := d.request(ctx, pathname, http.MethodGet, func(req *resty.Request) {
_, err := d.request(ctx, pathname, "GET", func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"auth_type": "code",
"client_id": d.conf.clientID,
@ -123,7 +123,7 @@ func (d *QuarkUCTV) getCode(ctx context.Context) (string, error) {
CommonRsp
Code string `json:"code"`
}
_, err := d.request(ctx, pathname, http.MethodGet, func(req *resty.Request) {
_, err := d.request(ctx, pathname, "GET", func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"client_id": d.conf.clientID,
"scope": "netdisk",
@ -138,7 +138,7 @@ func (d *QuarkUCTV) getCode(ctx context.Context) (string, error) {
func (d *QuarkUCTV) getRefreshTokenByTV(ctx context.Context, code string, isRefresh bool) error {
pathname := "/token"
_, _, reqID := d.generateReqSign(http.MethodPost, pathname, d.conf.signKey)
_, _, reqID := d.generateReqSign("POST", pathname, d.conf.signKey)
u := d.conf.codeApi + pathname
var resp RefreshTokenAuthResp
body := map[string]string{

View File

@ -38,7 +38,7 @@ func getCredentials(AccessKey, SecretKey string) (rst Credentials, err error) {
sign := hex.EncodeToString(hmacObj.Sum(nil))
Authorization := "TOKEN " + AccessKey + ":" + sign
req, err := http.NewRequest(http.MethodPost, "https://api.dogecloud.com"+apiPath, strings.NewReader(string(reqBody)))
req, err := http.NewRequest("POST", "https://api.dogecloud.com"+apiPath, strings.NewReader(string(reqBody)))
if err != nil {
return rst, err
}

View File

@ -63,20 +63,20 @@ func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
if err != nil {
return nil, err
}
mFile := &stream.RateLimitFile{
File: remoteFile,
Limiter: stream.ServerDownloadLimit,
Ctx: ctx,
}
if !d.Config().OnlyLinkMFile {
if remoteFile != nil && !d.Config().OnlyLinkMFile {
return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
RangeReader: &model.FileRangeReader{
RangeReaderIF: stream.RateLimitRangeReaderFunc(stream.GetRangeReaderFromMFile(file.GetSize(), remoteFile)),
},
SyncClosers: utils.NewSyncClosers(remoteFile),
}, nil
}
return &model.Link{
MFile: mFile,
SyncClosers: utils.NewSyncClosers(remoteFile),
MFile: &stream.RateLimitFile{
File: remoteFile,
Limiter: stream.ServerDownloadLimit,
Ctx: ctx,
},
}, nil
}

View File

@ -13,8 +13,8 @@ import (
// do others that not defined in Driver interface
func (d *SFTP) initClient() error {
_, err, _ := singleflight.AnyGroup.Do(fmt.Sprintf("SFTP.initClient:%p", d), func() (any, error) {
return nil, d._initClient()
err, _, _ := singleflight.ErrorGroup.Do(fmt.Sprintf("SFTP.initClient:%p", d), func() (error, error) {
return d._initClient(), nil
})
return err
}

View File

@ -81,20 +81,19 @@ func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
return nil, err
}
d.updateLastConnTime()
mFile := &stream.RateLimitFile{
File: remoteFile,
Limiter: stream.ServerDownloadLimit,
Ctx: ctx,
}
if !d.Config().OnlyLinkMFile {
if remoteFile != nil && !d.Config().OnlyLinkMFile {
return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
SyncClosers: utils.NewSyncClosers(remoteFile),
RangeReader: &model.FileRangeReader{
RangeReaderIF: stream.RateLimitRangeReaderFunc(stream.GetRangeReaderFromMFile(file.GetSize(), remoteFile)),
},
}, nil
}
return &model.Link{
MFile: mFile,
SyncClosers: utils.NewSyncClosers(remoteFile),
MFile: &stream.RateLimitFile{
File: remoteFile,
Limiter: stream.ServerDownloadLimit,
Ctx: ctx,
},
}, nil
}

View File

@ -28,8 +28,8 @@ func (d *SMB) getLastConnTime() time.Time {
}
func (d *SMB) initFS() error {
_, err, _ := singleflight.AnyGroup.Do(fmt.Sprintf("SMB.initFS:%p", d), func() (any, error) {
return nil, d._initFS()
err, _, _ := singleflight.ErrorGroup.Do(fmt.Sprintf("SMB.initFS:%p", d), func() (error, error) {
return d._initFS(), nil
})
return err
}

View File

@ -3,17 +3,13 @@ package strm
import (
"context"
"errors"
"fmt"
stdpath "path"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
)
type Strm struct {
@ -22,9 +18,6 @@ type Strm struct {
pathMap map[string][]string
autoFlatten bool
oneKey string
supportSuffix map[string]struct{}
downloadSuffix map[string]struct{}
}
func (d *Strm) Config() driver.Config {
@ -58,24 +51,12 @@ func (d *Strm) Init(ctx context.Context) error {
d.autoFlatten = false
}
d.supportSuffix = supportSuffix()
if d.FilterFileTypes != "" {
types := strings.Split(d.FilterFileTypes, ",")
for _, ext := range types {
ext = strings.ToLower(strings.TrimSpace(ext))
if ext != "" {
d.supportSuffix[ext] = struct{}{}
}
}
}
d.downloadSuffix = downloadSuffix()
if d.DownloadFileTypes != "" {
downloadTypes := strings.Split(d.DownloadFileTypes, ",")
for _, ext := range downloadTypes {
ext = strings.ToLower(strings.TrimSpace(ext))
if ext != "" {
d.downloadSuffix[ext] = struct{}{}
supportSuffix[ext] = struct{}{}
}
}
}
@ -84,8 +65,6 @@ func (d *Strm) Init(ctx context.Context) error {
func (d *Strm) Drop(ctx context.Context) error {
d.pathMap = nil
d.downloadSuffix = nil
d.supportSuffix = nil
return nil
}
@ -103,25 +82,10 @@ func (d *Strm) Get(ctx context.Context, path string) (model.Obj, error) {
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
reqPath := stdpath.Join(dst, sub)
obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil {
continue
obj, err := d.get(ctx, path, dst, sub)
if err == nil {
return obj, nil
}
// fs.Get 没报错说明不是strm生成的路径需要直接返回
size := int64(0)
if !obj.IsDir() {
size = obj.GetSize()
path = reqPath //把路径设置为真实的供Link直接读取
}
return &model.Object{
Path: path,
Name: obj.GetName(),
Size: size,
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}, nil
}
return nil, errs.ObjectNotFound
}
@ -148,34 +112,34 @@ func (d *Strm) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]
}
func (d *Strm) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if file.GetID() == "strm" {
link := d.getLink(ctx, file.GetPath())
return &model.Link{
MFile: strings.NewReader(link),
}, nil
}
// ftp,s3
if common.GetApiUrl(ctx) == "" {
args.Redirect = false
}
reqPath := file.GetPath()
link, _, err := d.link(ctx, reqPath, args)
if err != nil {
return nil, err
}
link := d.getLink(ctx, file.GetPath())
return &model.Link{
MFile: strings.NewReader(link),
}, nil
}
if link == nil {
return &model.Link{
URL: fmt.Sprintf("%s/p%s?sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(reqPath, true),
sign.Sign(reqPath)),
}, nil
}
func (d *Strm) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
return errors.New("strm Driver cannot make dir")
}
resultLink := *link
resultLink.SyncClosers = utils.NewSyncClosers(link)
return &resultLink, nil
func (d *Strm) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
return errors.New("strm Driver cannot move file")
}
func (d *Strm) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
return errors.New("strm Driver cannot rename file")
}
func (d *Strm) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
return errors.New("strm Driver cannot copy file")
}
func (d *Strm) Remove(ctx context.Context, obj model.Obj) error {
return errors.New("strm Driver cannot remove file")
}
func (d *Strm) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
return errors.New("strm Driver cannot put file")
}
var _ driver.Driver = (*Strm)(nil)

View File

@ -6,12 +6,10 @@ import (
)
type Addition struct {
Paths string `json:"paths" required:"true" type:"text"`
SiteUrl string `json:"siteUrl" type:"text" required:"false" help:"The prefix URL of the strm file"`
FilterFileTypes string `json:"filterFileTypes" type:"text" default:"strm" required:"false" help:"Supports suffix name of strm file"`
DownloadFileTypes string `json:"downloadFileTypes" type:"text" default:"ass" required:"false" help:"Files need to download with strm (usally subtitles)"`
EncodePath bool `json:"encodePath" default:"true" required:"true" help:"encode the path in the strm file"`
LocalModel bool `json:"localModel" default:"false" help:"enable local mode"`
Paths string `json:"paths" required:"true" type:"text"`
SiteUrl string `json:"siteUrl" type:"text" required:"false" help:"The prefix URL of the strm file"`
FilterFileTypes string `json:"filterFileTypes" type:"text" default:"strm" required:"false" help:"Supports suffix name of strm file"`
EncodePath bool `json:"encodePath" default:"true" required:"true" help:"encode the path in the strm file"`
}
var config = driver.Config{

View File

@ -1,36 +1,22 @@
package strm
func supportSuffix() map[string]struct{} {
return map[string]struct{}{
// video
"mp4": {},
"mkv": {},
"flv": {},
"avi": {},
"wmv": {},
"ts": {},
"rmvb": {},
"webm": {},
// audio
"mp3": {},
"flac": {},
"aac": {},
"wav": {},
"ogg": {},
"m4a": {},
"wma": {},
"alac": {},
}
}
func downloadSuffix() map[string]struct{} {
return map[string]struct{}{
// strm
"strm": {},
// subtitles
"ass": {},
"srt": {},
"vtt": {},
"sub": {},
}
var supportSuffix = map[string]struct{}{
// video
"mp4": {},
"mkv": {},
"flv": {},
"avi": {},
"wmv": {},
"ts": {},
"rmvb": {},
"webm": {},
// audio
"mp3": {},
"flac": {},
"aac": {},
"wav": {},
"ogg": {},
"m4a": {},
"wma": {},
"alac": {},
}

View File

@ -9,7 +9,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
@ -52,6 +51,31 @@ func (d *Strm) getRootAndPath(path string) (string, string) {
return parts[0], parts[1]
}
func (d *Strm) get(ctx context.Context, path string, dst, sub string) (model.Obj, error) {
reqPath := stdpath.Join(dst, sub)
obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil {
return nil, err
}
size := int64(0)
if !obj.IsDir() {
if utils.Ext(obj.GetName()) == "strm" {
size = obj.GetSize()
} else {
file := stdpath.Join(reqPath, obj.GetName())
size = int64(len(d.getLink(ctx, file)))
}
}
return &model.Object{
Path: path,
Name: obj.GetName(),
Size: size,
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}, nil
}
func (d *Strm) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]model.Obj, error) {
reqPath := stdpath.Join(dst, sub)
objs, err := fs.List(ctx, reqPath, args)
@ -61,58 +85,48 @@ func (d *Strm) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]
var validObjs []model.Obj
for _, obj := range objs {
id, name, path := "", obj.GetName(), ""
size := int64(0)
if !obj.IsDir() {
path = stdpath.Join(reqPath, obj.GetName())
ext := strings.ToLower(utils.Ext(name))
if _, ok := d.supportSuffix[ext]; ok {
id = "strm"
name = strings.TrimSuffix(name, ext) + "strm"
size = int64(len(d.getLink(ctx, path)))
} else if _, ok := d.downloadSuffix[ext]; ok {
size = obj.GetSize()
} else {
ext := strings.ToLower(utils.Ext(obj.GetName()))
if _, ok := supportSuffix[ext]; !ok {
continue
}
}
validObjs = append(validObjs, obj)
}
return utils.SliceConvert(validObjs, func(obj model.Obj) (model.Obj, error) {
name := obj.GetName()
size := int64(0)
if !obj.IsDir() {
ext := utils.Ext(name)
name = strings.TrimSuffix(name, ext) + "strm"
if ext == "strm" {
size = obj.GetSize()
} else {
file := stdpath.Join(reqPath, obj.GetName())
size = int64(len(d.getLink(ctx, file)))
}
}
objRes := model.Object{
ID: id,
Path: path,
Name: name,
Size: size,
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
Path: stdpath.Join(reqPath, obj.GetName()),
}
thumb, ok := model.GetThumb(obj)
if !ok {
validObjs = append(validObjs, &objRes)
continue
return &objRes, nil
}
validObjs = append(validObjs, &model.ObjThumb{
return &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
})
}
return validObjs, nil
}, nil
})
}
func (d *Strm) getLink(ctx context.Context, path string) string {
finalPath := path
if d.EncodePath {
finalPath = utils.EncodePath(path, true)
}
if d.EnableSign {
signPath := sign.Sign(path)
finalPath = fmt.Sprintf("%s?sign=%s", finalPath, signPath)
}
if d.LocalModel {
return finalPath
}
apiUrl := d.SiteUrl
if len(apiUrl) > 0 {
apiUrl = strings.TrimSuffix(apiUrl, "/")
@ -120,25 +134,15 @@ func (d *Strm) getLink(ctx context.Context, path string) string {
apiUrl = common.GetApiUrl(ctx)
}
return fmt.Sprintf("%s/d%s",
apiUrl,
finalPath)
}
if d.EncodePath {
path = utils.EncodePath(path, true)
}
if !d.EnableSign {
return fmt.Sprintf("%s/d%s", apiUrl, path)
}
func (d *Strm) link(ctx context.Context, reqPath string, args model.LinkArgs) (*model.Link, model.Obj, error) {
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil {
return nil, nil, err
}
if !args.Redirect {
return op.Link(ctx, storage, reqActualPath, args)
}
obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil {
return nil, nil, err
}
if common.ShouldProxy(storage, stdpath.Base(reqPath)) {
return nil, obj, nil
}
return op.Link(ctx, storage, reqActualPath, args)
return fmt.Sprintf("%s/d%s?sign=%s",
apiUrl,
path,
sign.Sign(path))
}

View File

@ -132,7 +132,7 @@ func (d *Terabox) Remove(ctx context.Context, obj model.Obj) error {
func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
resp, err := base.RestyClient.R().
SetContext(ctx).
Get("https://" + d.url_domain_prefix + "-data.terabox.com/rest/2.0/pcs/file?method=locateupload")
Get("https://d.terabox.com/rest/2.0/pcs/file?method=locateupload")
if err != nil {
return err
}
@ -179,7 +179,7 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
}
// upload chunks
tempFile, err := stream.CacheFullAndWriter(&up, nil)
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return err
}

View File

@ -371,7 +371,9 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.Fi
gcid := file.GetHash().GetHash(hash_extend.GCID)
var err error
if len(gcid) < hash_extend.GCID.Width {
_, gcid, err = stream.CacheFullAndHash(file, &up, hash_extend.GCID, file.GetSize())
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, gcid, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, hash_extend.GCID, file.GetSize())
if err != nil {
return err
}

View File

@ -491,7 +491,9 @@ func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream
gcid := stream.GetHash().GetHash(hash_extend.GCID)
var err error
if len(gcid) < hash_extend.GCID.Width {
_, gcid, err = streamPkg.CacheFullAndHash(stream, &up, hash_extend.GCID, stream.GetSize())
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, gcid, err = streamPkg.CacheFullInTempFileAndHash(stream, cacheFileProgress, hash_extend.GCID, stream.GetSize())
if err != nil {
return err
}

View File

@ -2,11 +2,8 @@ package thunderx
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
@ -372,7 +369,9 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, file model.F
gcid := file.GetHash().GetHash(hash_extend.GCID)
var err error
if len(gcid) < hash_extend.GCID.Width {
_, gcid, err = stream.CacheFullAndHash(file, &up, hash_extend.GCID, file.GetSize())
cacheFileProgress := model.UpdateProgressWithRange(up, 0, 50)
up = model.UpdateProgressWithRange(up, 50, 100)
_, gcid, err = stream.CacheFullInTempFileAndHash(file, cacheFileProgress, hash_extend.GCID, file.GetSize())
if err != nil {
return err
}
@ -480,8 +479,7 @@ func (xc *XunLeiXCommon) Request(url string, method string, callback base.ReqCal
}
}, resp)
var errResp *ErrResp
ok := errors.As(err, &errResp)
errResp, ok := err.(*ErrResp)
if !ok {
return nil, err
}
@ -560,84 +558,3 @@ func (xc *XunLeiXCommon) IsLogin() bool {
_, err := xc.Request(XLUSER_API_URL+"/user/me", http.MethodGet, nil, nil)
return err == nil
}
// 离线下载文件都和Pikpak接口一致
func (xc *XunLeiXCommon) OfflineDownload(ctx context.Context, fileUrl string, parentDir model.Obj, fileName string) (*OfflineTask, error) {
requestBody := base.Json{
"kind": "drive#file",
"name": fileName,
"upload_type": "UPLOAD_TYPE_URL",
"url": base.Json{
"url": fileUrl,
},
"params": base.Json{},
"parent_id": parentDir.GetID(),
}
var resp OfflineDownloadResp // 一样的
_, err := xc.Request(FILE_API_URL, http.MethodPost, func(req *resty.Request) {
req.SetContext(ctx).
SetBody(requestBody)
}, &resp)
if err != nil {
return nil, err
}
return &resp.Task, err
}
// 获取离线下载任务列表
func (xc *XunLeiXCommon) OfflineList(ctx context.Context, nextPageToken string, phase []string) ([]OfflineTask, error) {
res := make([]OfflineTask, 0)
if len(phase) == 0 {
phase = []string{"PHASE_TYPE_RUNNING", "PHASE_TYPE_ERROR", "PHASE_TYPE_COMPLETE", "PHASE_TYPE_PENDING"}
}
params := map[string]string{
"type": "offline",
"thumbnail_size": "SIZE_SMALL",
"limit": "10000",
"page_token": nextPageToken,
"with": "reference_resource",
}
// 处理 phase 参数
if len(phase) > 0 {
filters := base.Json{
"phase": map[string]string{
"in": strings.Join(phase, ","),
},
}
filtersJSON, err := json.Marshal(filters)
if err != nil {
return nil, fmt.Errorf("failed to marshal filters: %w", err)
}
params["filters"] = string(filtersJSON)
}
var resp OfflineListResp
_, err := xc.Request(TASKS_API_URL, http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx).
SetQueryParams(params)
}, &resp)
if err != nil {
return nil, fmt.Errorf("failed to get offline list: %w", err)
}
res = append(res, resp.Tasks...)
return res, nil
}
func (xc *XunLeiXCommon) DeleteOfflineTasks(ctx context.Context, taskIDs []string, deleteFiles bool) error {
params := map[string]string{
"task_ids": strings.Join(taskIDs, ","),
"delete_files": strconv.FormatBool(deleteFiles),
}
_, err := xc.Request(TASKS_API_URL, http.MethodDelete, func(req *resty.Request) {
req.SetContext(ctx).
SetQueryParams(params)
}, nil)
if err != nil {
return fmt.Errorf("failed to delete tasks %v: %w", taskIDs, err)
}
return nil
}

View File

@ -204,102 +204,3 @@ type UploadTaskResponse struct {
File Files `json:"file"`
}
// 添加离线下载响应
type OfflineDownloadResp struct {
File *string `json:"file"`
Task OfflineTask `json:"task"`
UploadType string `json:"upload_type"`
URL struct {
Kind string `json:"kind"`
} `json:"url"`
}
// 离线下载列表
type OfflineListResp struct {
ExpiresIn int64 `json:"expires_in"`
NextPageToken string `json:"next_page_token"`
Tasks []OfflineTask `json:"tasks"`
}
// offlineTask
type OfflineTask struct {
Callback string `json:"callback"`
CreatedTime string `json:"created_time"`
FileID string `json:"file_id"`
FileName string `json:"file_name"`
FileSize string `json:"file_size"`
IconLink string `json:"icon_link"`
ID string `json:"id"`
Kind string `json:"kind"`
Message string `json:"message"`
Name string `json:"name"`
Params Params `json:"params"`
Phase string `json:"phase"` // PHASE_TYPE_RUNNING, PHASE_TYPE_ERROR, PHASE_TYPE_COMPLETE, PHASE_TYPE_PENDING
Progress int64 `json:"progress"`
ReferenceResource ReferenceResource `json:"reference_resource"`
Space string `json:"space"`
StatusSize int64 `json:"status_size"`
Statuses []string `json:"statuses"`
ThirdTaskID string `json:"third_task_id"`
Type string `json:"type"`
UpdatedTime string `json:"updated_time"`
UserID string `json:"user_id"`
}
type Params struct {
Age string `json:"age"`
MIMEType *string `json:"mime_type,omitempty"`
PredictType string `json:"predict_type"`
URL string `json:"url"`
}
type ReferenceResource struct {
Type string `json:"@type"`
Audit interface{} `json:"audit"`
Hash string `json:"hash"`
IconLink string `json:"icon_link"`
ID string `json:"id"`
Kind string `json:"kind"`
Medias []Media `json:"medias"`
MIMEType string `json:"mime_type"`
Name string `json:"name"`
Params map[string]interface{} `json:"params"`
ParentID string `json:"parent_id"`
Phase string `json:"phase"`
Size string `json:"size"`
Space string `json:"space"`
Starred bool `json:"starred"`
Tags []string `json:"tags"`
ThumbnailLink string `json:"thumbnail_link"`
}
type Media struct {
MediaId string `json:"media_id"`
MediaName string `json:"media_name"`
Video struct {
Height int `json:"height"`
Width int `json:"width"`
Duration int `json:"duration"`
BitRate int `json:"bit_rate"`
FrameRate int `json:"frame_rate"`
VideoCodec string `json:"video_codec"`
AudioCodec string `json:"audio_codec"`
VideoType string `json:"video_type"`
} `json:"video"`
Link struct {
Url string `json:"url"`
Token string `json:"token"`
Expire time.Time `json:"expire"`
} `json:"link"`
NeedMoreQuota bool `json:"need_more_quota"`
VipTypes []interface{} `json:"vip_types"`
RedirectLink string `json:"redirect_link"`
IconLink string `json:"icon_link"`
IsDefault bool `json:"is_default"`
Priority int `json:"priority"`
IsOrigin bool `json:"is_origin"`
ResolutionName string `json:"resolution_name"`
IsVisible bool `json:"is_visible"`
Category string `json:"category"`
}

View File

@ -19,7 +19,6 @@ import (
const (
API_URL = "https://api-pan.xunleix.com/drive/v1"
FILE_API_URL = API_URL + "/files"
TASKS_API_URL = API_URL + "/tasks"
XLUSER_API_URL = "https://xluser-ssl.xunleix.com/v1"
)

View File

@ -10,7 +10,7 @@ type Addition struct {
// driver.RootPath
// driver.RootID
// define other
UrlStructure string `json:"url_structure" type:"text" required:"true" default:"https://raw.githubusercontent.com/OpenListTeam/OpenList/main/README.md\nhttps://raw.githubusercontent.com/OpenListTeam/OpenList/main/README_cn.md\nfolder:\n CONTRIBUTING.md:1635:https://raw.githubusercontent.com/OpenListTeam/OpenList/main/CONTRIBUTING.md\n CODE_OF_CONDUCT.md:2093:https://raw.githubusercontent.com/OpenListTeam/OpenList/main/CODE_OF_CONDUCT.md" help:"structure:FolderName:\n [FileName:][FileSize:][Modified:]Url"`
UrlStructure string `json:"url_structure" type:"text" required:"true" default:"https://cdn.oplist.org/gh/OpenListTeam/OpenList/README.md\nhttps://cdn.oplist.org/gh/OpenListTeam/OpenList/README_cn.md\nfolder:\n CONTRIBUTING.md:1635:https://cdn.oplist.org/gh/OpenListTeam/OpenList/CONTRIBUTING.md\n CODE_OF_CONDUCT.md:2093:https://cdn.oplist.org/gh/OpenListTeam/OpenList/CODE_OF_CONDUCT.md" help:"structure:FolderName:\n [FileName:][FileSize:][Modified:]Url"`
HeadSize bool `json:"head_size" type:"bool" default:"false" help:"Use head method to get file size, but it may be failed."`
Writable bool `json:"writable" type:"bool" default:"false"`
}

View File

@ -54,6 +54,10 @@ func (f DummyMFile) ReadAt(p []byte, off int64) (n int, err error) {
return f.Reader.Read(p)
}
func (f DummyMFile) Close() error {
return nil
}
func (DummyMFile) Seek(offset int64, whence int) (int64, error) {
return offset, nil
}

Some files were not shown because too many files have changed in this diff Show More